Compare commits
1314 Commits
v0.3.2
...
feature/ag
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a25b173f98 | ||
|
|
d7944a7051 | ||
|
|
67917ea7d3 | ||
|
|
e3287115c5 | ||
|
|
855c1f1b07 | ||
|
|
dc815664c4 | ||
|
|
e08bbb142a | ||
|
|
58e0b166cb | ||
|
|
2a678bb017 | ||
|
|
5664456b77 | ||
|
|
3685b7e57e | ||
|
|
989d5f73b1 | ||
|
|
4f84073cb5 | ||
|
|
c190295c34 | ||
|
|
60875644a1 | ||
|
|
113b09ad01 | ||
|
|
2605d0e671 | ||
|
|
d232b91d31 | ||
|
|
c65db31fd9 | ||
|
|
99871805bd | ||
|
|
e8ef39adad | ||
|
|
466b9217b5 | ||
|
|
c9a7f519b9 | ||
|
|
96ae532879 | ||
|
|
eda08d5b0f | ||
|
|
7c12b58bb5 | ||
|
|
5446c89bc0 | ||
|
|
2d0251e585 | ||
|
|
f41710c892 | ||
|
|
df3f79f282 | ||
|
|
f8df692865 | ||
|
|
0c6d3b188d | ||
|
|
e7a38863ab | ||
|
|
720e0fcdab | ||
|
|
bf8ff84522 | ||
|
|
5a9510238e | ||
|
|
7b3c74179b | ||
|
|
cd70fa4c32 | ||
|
|
83133ced6a | ||
|
|
6c5179a179 | ||
|
|
e33ab39b85 | ||
|
|
9567bcec1b | ||
|
|
550b16dc0b | ||
|
|
5d8331b7f7 | ||
|
|
e35b643e51 | ||
|
|
bc6a92677b | ||
|
|
f52072e6ec | ||
|
|
9c43c43a46 | ||
|
|
0430e0f930 | ||
|
|
b945243d1a | ||
|
|
d8484a8b26 | ||
|
|
3c27499795 | ||
|
|
7c772e873d | ||
|
|
db2fab245e | ||
|
|
a9c9917f1a | ||
|
|
23e2e9e9cc | ||
|
|
2369e92460 | ||
|
|
a53b15f2a3 | ||
|
|
72eb8b1eb6 | ||
|
|
4db54f3b83 | ||
|
|
24eb27f005 | ||
|
|
009d76ea35 | ||
|
|
6e8a425eb1 | ||
|
|
66188d791b | ||
|
|
015ff02d71 | ||
|
|
10bfaf5415 | ||
|
|
e3e0b85e0c | ||
|
|
ad0632892e | ||
|
|
f26791ba39 | ||
|
|
2fbaaebf44 | ||
|
|
edb916338c | ||
|
|
f7e947d37d | ||
|
|
a9e3d1ed75 | ||
|
|
ce97827c42 | ||
|
|
3efec07338 | ||
|
|
68f401bfa3 | ||
|
|
1ea525feaa | ||
|
|
57c4a7527e | ||
|
|
5aa9c045e1 | ||
|
|
6f1900f3bb | ||
|
|
bc62de795e | ||
|
|
c62ca4b183 | ||
|
|
876e5bc683 | ||
|
|
b99f3b73cd | ||
|
|
7eecf29449 | ||
|
|
1d331d7810 | ||
|
|
68414678d8 | ||
|
|
2f6b9dac26 | ||
|
|
d1812d875b | ||
|
|
723dea100f | ||
|
|
c4419ed31f | ||
|
|
754ab86e51 | ||
|
|
04dab532cd | ||
|
|
add01ebc68 | ||
|
|
1cc9a1a30b | ||
|
|
92a1de7500 | ||
|
|
a6fedcff80 | ||
|
|
55eb999305 | ||
|
|
377b7b12ce | ||
|
|
ba2906a42e | ||
|
|
ee27f14be0 | ||
|
|
46c8be63a7 | ||
|
|
7ba66c419a | ||
|
|
340775a593 | ||
|
|
35d2ec8a44 | ||
|
|
2983b9950f | ||
|
|
dbf08a6cf8 | ||
|
|
28f31be36f | ||
|
|
3ec4db0225 | ||
|
|
f5688e077a | ||
|
|
2464d255d5 | ||
|
|
586d950b8c | ||
|
|
e7469388cc | ||
|
|
ab6ca8e16a | ||
|
|
02413a4fac | ||
|
|
05b8dd9ad8 | ||
|
|
29c9419a6e | ||
|
|
90e61989a4 | ||
|
|
b1f9f90fec | ||
|
|
b40849f672 | ||
|
|
44560c8da8 | ||
|
|
46fd01c264 | ||
|
|
100695c262 | ||
|
|
54b5a4ae55 | ||
|
|
ffb252962b | ||
|
|
ae31270e63 | ||
|
|
9b2b54d585 | ||
|
|
e1ccc583a3 | ||
|
|
7750e33f82 | ||
|
|
d2c4741f0b | ||
|
|
c79c4f6bde | ||
|
|
3849d0d1a9 | ||
|
|
8bd71ccd5e | ||
|
|
b731f7fb64 | ||
|
|
cd554f77f3 | ||
|
|
8c977c51ca | ||
|
|
a3252f9671 | ||
|
|
9bc945f76f | ||
|
|
f6b4dfffb6 | ||
|
|
68955c29cb | ||
|
|
97e4d036dc | ||
|
|
0f49f54c29 | ||
|
|
828e13adbb | ||
|
|
e6f0067728 | ||
|
|
5c473eb9cc | ||
|
|
2adf34fbaf | ||
|
|
05dd760388 | ||
|
|
2cf4864078 | ||
|
|
df4c92672f | ||
|
|
5b173315f9 | ||
|
|
c85ea7d8fa | ||
|
|
113154702f | ||
|
|
33ae46f76a | ||
|
|
27272680a2 | ||
|
|
b1621f6b34 | ||
|
|
2c65033c0a | ||
|
|
dcfbaa9243 | ||
|
|
accef65ede | ||
|
|
50755d8ba3 | ||
|
|
47b6509f70 | ||
|
|
89f3fdc05f | ||
|
|
03f8b73627 | ||
|
|
2e6e9635c3 | ||
|
|
6a312e3fdd | ||
|
|
79dbbdf6b4 | ||
|
|
0e8961efe3 | ||
|
|
fc2be42418 | ||
|
|
ab4336cfd7 | ||
|
|
20d3b5288c | ||
|
|
63a29d3a4a | ||
|
|
31856d9895 | ||
|
|
f51dcf23d6 | ||
|
|
6ecaeb4fde | ||
|
|
1883c9666e | ||
|
|
4b4cf76641 | ||
|
|
0016b4bd72 | ||
|
|
495bbecc01 | ||
|
|
e6af7e9885 | ||
|
|
182b8c2283 | ||
|
|
5318cccc5f | ||
|
|
99739575d4 | ||
|
|
b8ff331ccc | ||
|
|
9e63f3f7c6 | ||
|
|
6f9069a4fb | ||
|
|
a18ab7f1e9 | ||
|
|
05162ca350 | ||
|
|
be0371fb11 | ||
|
|
fa3329abf2 | ||
|
|
e830fade06 | ||
|
|
ac392dcb96 | ||
|
|
e662b2f393 | ||
|
|
00a5fdf491 | ||
|
|
63bc71da13 | ||
|
|
7fff9579c0 | ||
|
|
737beb11f6 | ||
|
|
f55af7da4c | ||
|
|
80461a78b0 | ||
|
|
40d194672b | ||
|
|
d63341ea06 | ||
|
|
df8c8dc93b | ||
|
|
dd3a140cb1 | ||
|
|
1b006599cf | ||
|
|
44aa3cc9b5 | ||
|
|
b88b24e231 | ||
|
|
890c31ba74 | ||
|
|
6dc9a11a89 | ||
|
|
ce2842d365 | ||
|
|
7d1096dbd8 | ||
|
|
95722802dc | ||
|
|
3047dae703 | ||
|
|
95cad7bdd9 | ||
|
|
4e22f13007 | ||
|
|
04611b0ae2 | ||
|
|
a00f1ab549 | ||
|
|
446b37793b | ||
|
|
b2b98643d8 | ||
|
|
b83eeeb131 | ||
|
|
e8d727c07a | ||
|
|
bb8109f67d | ||
|
|
e28fa26c43 | ||
|
|
639fc3793a | ||
|
|
2aaae5265a | ||
|
|
baa4c1fd25 | ||
|
|
479797361e | ||
|
|
0a9f1d2a27 | ||
|
|
5e103770fd | ||
|
|
e012a29b5e | ||
|
|
5d759f810c | ||
|
|
eb1f3a0ced | ||
|
|
29e8210782 | ||
|
|
45ca9405d3 | ||
|
|
e6f02bf8f7 | ||
|
|
57e75e3614 | ||
|
|
89ab67e067 | ||
|
|
e9d851e4d3 | ||
|
|
c675d0feee | ||
|
|
1859c0505e | ||
|
|
f15251096c | ||
|
|
115c599fd8 | ||
|
|
3121c08ee8 | ||
|
|
ef28b01286 | ||
|
|
a5bac39196 | ||
|
|
9f640b24b3 | ||
|
|
f48750c22c | ||
|
|
7a96e94491 | ||
|
|
22a32af750 | ||
|
|
dd423f2e7b | ||
|
|
12dec676db | ||
|
|
75e7556bfa | ||
|
|
504f1a8e97 | ||
|
|
e4a2af6ae7 | ||
|
|
fefa88fc2a | ||
|
|
ed8a7ee8a5 | ||
|
|
1771797453 | ||
|
|
46179f5c83 | ||
|
|
db6fc661a6 | ||
|
|
beb3a9f60a | ||
|
|
c088ab7a79 | ||
|
|
aab2b8fdbc | ||
|
|
b1e7a717af | ||
|
|
25e38bfc98 | ||
|
|
279c7324c4 | ||
|
|
1c90303914 | ||
|
|
6ab6502742 | ||
|
|
b79c029f21 | ||
|
|
020268fe67 | ||
|
|
176b1c9d20 | ||
|
|
5ab2efa0c0 | ||
|
|
88320488a7 | ||
|
|
2091abeea2 | ||
|
|
480f5c1a9a | ||
|
|
8e0db2705f | ||
|
|
1be9cdae67 | ||
|
|
e1a91a7e53 | ||
|
|
b952e3183f | ||
|
|
26ae0bf207 | ||
|
|
42cfd69463 | ||
|
|
7694b68e06 | ||
|
|
28e39c57bd | ||
|
|
2fa0a57d2b | ||
|
|
c9f3e1bdab | ||
|
|
2ba56b8c59 | ||
|
|
fb074c8c32 | ||
|
|
9fc082d1e6 | ||
|
|
dfda2f7d5d | ||
|
|
0c04802560 | ||
|
|
5146689158 | ||
|
|
e7fa94c3d3 | ||
|
|
a77ebd3b55 | ||
|
|
00114287e5 | ||
|
|
db0695126f | ||
|
|
eec5cf6b65 | ||
|
|
a9569d0ed9 | ||
|
|
88d9388be2 | ||
|
|
93c72ecea5 | ||
|
|
b5b0ac50bd | ||
|
|
4d2afdb1a9 | ||
|
|
39a177bd70 | ||
|
|
34fb6ac837 | ||
|
|
f868a454d9 | ||
|
|
24c6cd235b | ||
|
|
47855dc78b | ||
|
|
751ceab04e | ||
|
|
dbbc42c5fd | ||
|
|
27416efb6d | ||
|
|
21dd08544b | ||
|
|
ae88f7d181 | ||
|
|
9981ee7601 | ||
|
|
66b018a355 | ||
|
|
b6c48d0f98 | ||
|
|
097d77f7b3 | ||
|
|
ed1bc6c215 | ||
|
|
c552fdfc0f | ||
|
|
4006dba9f1 | ||
|
|
7a0586684b | ||
|
|
8f34d1c555 | ||
|
|
571db5c0ee | ||
|
|
9059855f2b | ||
|
|
e423678995 | ||
|
|
ece5577f26 | ||
|
|
f373abdd14 | ||
|
|
4defec194f | ||
|
|
5270a6781f | ||
|
|
fa93e195cb | ||
|
|
72898d897c | ||
|
|
c6ee65b654 | ||
|
|
4d7694de24 | ||
|
|
a083f25b6c | ||
|
|
befa9eb16d | ||
|
|
a278c630bb | ||
|
|
6a8d8babce | ||
|
|
76eb0f1775 | ||
|
|
0abe08f243 | ||
|
|
f692ebbbb9 | ||
|
|
c174b65465 | ||
|
|
015131f198 | ||
|
|
a730543c76 | ||
|
|
c704626a39 | ||
|
|
7ef25a3816 | ||
|
|
b43ad93c54 | ||
|
|
7850681ce1 | ||
|
|
846189b15b | ||
|
|
46a893a8b6 | ||
|
|
657aac0d68 | ||
|
|
30885cee01 | ||
|
|
9237984782 | ||
|
|
c289629a28 | ||
|
|
806196f572 | ||
|
|
0e598660b4 | ||
|
|
058bfe0737 | ||
|
|
81932c8cff | ||
|
|
bd7adafee0 | ||
|
|
faf0c2b816 | ||
|
|
419d4986f6 | ||
|
|
9f1a9a7d9c | ||
|
|
a3e7e7c6c9 | ||
|
|
94a5075b6d | ||
|
|
7c32404b69 | ||
|
|
d0c2dc53fe | ||
|
|
0e8530172c | ||
|
|
4427aeac54 | ||
|
|
93640bb08e | ||
|
|
512ed71fc3 | ||
|
|
0cfc43c444 | ||
|
|
ecd0edc29e | ||
|
|
6168a006f4 | ||
|
|
82ba5dad1b | ||
|
|
972ee8e42e | ||
|
|
7cd3f285ad | ||
|
|
89e327383e | ||
|
|
290a15bbd9 | ||
|
|
1dd21f1f76 | ||
|
|
46b3f83ce2 | ||
|
|
5c153c9e21 | ||
|
|
bca75a3ea4 | ||
|
|
0bc6f972b2 | ||
|
|
36cc9cc1ec | ||
|
|
20f6a5e797 | ||
|
|
ccbb68aa0c | ||
|
|
08003c59b6 | ||
|
|
dafa638558 | ||
|
|
75e5250509 | ||
|
|
0ed6eb7029 | ||
|
|
63e26b6050 | ||
|
|
949f1c648a | ||
|
|
3e7578d670 | ||
|
|
6f07ec2597 | ||
|
|
e65c0a0d1d | ||
|
|
be217b5354 | ||
|
|
bfe3029d31 | ||
|
|
6abdc39fe5 | ||
|
|
bf55367f4d | ||
|
|
9480758310 | ||
|
|
25b33fb031 | ||
|
|
10ede0d21c | ||
|
|
698bdd619f | ||
|
|
5cef6874f6 | ||
|
|
6d42ae2629 | ||
|
|
a3b94816f9 | ||
|
|
e0b47feb8b | ||
|
|
8aecec0b9a | ||
|
|
078bf41029 | ||
|
|
2754302fb7 | ||
|
|
dfb7658c3e | ||
|
|
a743785faf | ||
|
|
e4782dee68 | ||
|
|
64315df85f | ||
|
|
2a1fd16849 | ||
|
|
21e31d540e | ||
|
|
370c38ec76 | ||
|
|
854044229c | ||
|
|
69baa44a3a | ||
|
|
419e3f7f2b | ||
|
|
a9373d9779 | ||
|
|
1a0536d212 | ||
|
|
099b77cf9b | ||
|
|
c3d17bf847 | ||
|
|
e04b93a51a | ||
|
|
b36b62c68e | ||
|
|
ab465a755e | ||
|
|
c6f19db1ec | ||
|
|
019142efc9 | ||
|
|
a535fc17c3 | ||
|
|
0fbb18b315 | ||
|
|
3eb0093d2a | ||
|
|
d159dde2ca | ||
|
|
729a510c5b | ||
|
|
196561fed2 | ||
|
|
8f0bdcd172 | ||
|
|
fffc7f4098 | ||
|
|
c7a2e7ada1 | ||
|
|
95611e9c4b | ||
|
|
62fc6afd8a | ||
|
|
0f5cec0a60 | ||
|
|
d235ebaac9 | ||
|
|
6def083b4f | ||
|
|
87322744d4 | ||
|
|
f2a02b392e | ||
|
|
e6cedc257e | ||
|
|
1b5cf2d272 | ||
|
|
f76e822381 | ||
|
|
a2b1968d6e | ||
|
|
398eb13a7f | ||
|
|
956c8a8e03 | ||
|
|
6aba166c82 | ||
|
|
fd7c7ea6b7 | ||
|
|
d85e621bb3 | ||
|
|
822dd5e100 | ||
|
|
25801f374c | ||
|
|
8fd2d0b35c | ||
|
|
c16d8a1da1 | ||
|
|
ab1fdf69c8 | ||
|
|
dd196c0e11 | ||
|
|
0e506f5716 | ||
|
|
0a98ccff0c | ||
|
|
0c188f6d10 | ||
|
|
8009dd691b | ||
|
|
13d0e9914b | ||
|
|
9da49be44d | ||
|
|
00f7fa507b | ||
|
|
2c255b6dfe | ||
|
|
6e2cf8bb3f | ||
|
|
68ed1c80ce | ||
|
|
e0d23f4436 | ||
|
|
509f8a5353 | ||
|
|
b0c0cd7fda | ||
|
|
133dfd5063 | ||
|
|
e6abf4e33b | ||
|
|
07104b18f5 | ||
|
|
f39b85abf2 | ||
|
|
c6c97491ac | ||
|
|
355452cdb3 | ||
|
|
da3720c7a9 | ||
|
|
e92d4ff147 | ||
|
|
bb514d6216 | ||
|
|
3f380fa0da | ||
|
|
5aefb707fa | ||
|
|
4afd3c2322 | ||
|
|
b8eb8a90a5 | ||
|
|
4d6cb091cc | ||
|
|
fc8b1193de | ||
|
|
2c12af5af8 | ||
|
|
bd4d89fc21 | ||
|
|
9487529992 | ||
|
|
fa347fd49d | ||
|
|
8f7072d7e9 | ||
|
|
412c5d68cc | ||
|
|
e06b068033 | ||
|
|
6234391229 | ||
|
|
2568bfde5e | ||
|
|
fd7c2fbe93 | ||
|
|
206c185a3b | ||
|
|
7689cbbe0d | ||
|
|
c832b5d29e | ||
|
|
b57a9351b3 | ||
|
|
f0ae9e21ae | ||
|
|
9510c92288 | ||
|
|
755f3f05d8 | ||
|
|
5d8114b475 | ||
|
|
0ccbb52c1f | ||
|
|
85b39ecf99 | ||
|
|
230838c22b | ||
|
|
a7bfcdcb01 | ||
|
|
47ff630c55 | ||
|
|
70dc53bda7 | ||
|
|
0b8a142de0 | ||
|
|
7e1b433c17 | ||
|
|
800b0763e4 | ||
|
|
30aabe255b | ||
|
|
9b14d714ca | ||
|
|
8a38666105 | ||
|
|
ec878defab | ||
|
|
1786b70e14 | ||
|
|
7f525fa7dc | ||
|
|
e08d93b2aa | ||
|
|
df777c63fe | ||
|
|
3a5ee4a296 | ||
|
|
7b8a0114f5 | ||
|
|
003d110948 | ||
|
|
e9c9a67365 | ||
|
|
8b89e03999 | ||
|
|
9eff920989 | ||
|
|
711c82472c | ||
|
|
156bf02d21 | ||
|
|
932b53d92d | ||
|
|
2693b9a42d | ||
|
|
e9166c4a7d | ||
|
|
2bc64920dd | ||
|
|
aee5500833 | ||
|
|
6b336b7b2f | ||
|
|
f07992c091 | ||
|
|
3c0e77241d | ||
|
|
87461c7f72 | ||
|
|
a67f2b4976 | ||
|
|
8594781780 | ||
|
|
313e415ee9 | ||
|
|
c13d8f3699 | ||
|
|
e41f8f1d0f | ||
|
|
b2c8907635 | ||
|
|
05f4df1a30 | ||
|
|
35fe06a892 | ||
|
|
75ff541aec | ||
|
|
cd933ce6e4 | ||
|
|
0b93988450 | ||
|
|
056cab23e0 | ||
|
|
6bc8027644 | ||
|
|
3b9298ed2b | ||
|
|
12a323f691 | ||
|
|
9c4c211233 | ||
|
|
74ba68ff2c | ||
|
|
7273b37c16 | ||
|
|
0d4ebffc0e | ||
|
|
352b2fb4e7 | ||
|
|
6e6ef57303 | ||
|
|
cc1f14e5e9 | ||
|
|
1c419d5c65 | ||
|
|
71b83245b4 | ||
|
|
2b88555028 | ||
|
|
f021ad9b0a | ||
|
|
8884f64b4e | ||
|
|
dd790dceb5 | ||
|
|
b80e41503f | ||
|
|
8dfc5052e9 | ||
|
|
7f28fc17ca | ||
|
|
2c308ccd35 | ||
|
|
4d6dd44e10 | ||
|
|
b6992e32a5 | ||
|
|
ac080edb02 | ||
|
|
231859303d | ||
|
|
1acdd67fd9 | ||
|
|
bec63a9471 | ||
|
|
44e856e8dc | ||
|
|
3bab7678b7 | ||
|
|
61f68d9e1b | ||
|
|
94f1562ec5 | ||
|
|
46412acd13 | ||
|
|
e7426ea365 | ||
|
|
665eef68b9 | ||
|
|
7c63d4012f | ||
|
|
92be4e774e | ||
|
|
2395502e60 | ||
|
|
9f3902b48d | ||
|
|
6e76bcb77e | ||
|
|
e05a95dc2d | ||
|
|
86d61d698a | ||
|
|
8ce6535a7e | ||
|
|
65ca038eee | ||
|
|
f41f5ebebd | ||
|
|
9cf62f03fa | ||
|
|
f770d5072e | ||
|
|
5698b830ed | ||
|
|
bcc76dd60a | ||
|
|
70d4a0c022 | ||
|
|
8cfd994170 | ||
|
|
22d8d08355 | ||
|
|
641e829e3f | ||
|
|
f9edff8bf4 | ||
|
|
33e6be1ca6 | ||
|
|
e25c50a467 | ||
|
|
f8441ab42e | ||
|
|
4589d4b3f5 | ||
|
|
9cf720e040 | ||
|
|
cf793f7f49 | ||
|
|
2b3fddfe89 | ||
|
|
e148f143ea | ||
|
|
d202cb731d | ||
|
|
299d9998ad | ||
|
|
fba1484e2e | ||
|
|
4ab7300376 | ||
|
|
18cc5e0ee8 | ||
|
|
af0cda5dbf | ||
|
|
a730a3719b | ||
|
|
3b669193f6 | ||
|
|
c782bab296 | ||
|
|
b14646ebd9 | ||
|
|
7441de5fd9 | ||
|
|
f5360cb8d4 | ||
|
|
22cd2e3337 | ||
|
|
7e9d453a2c | ||
|
|
a4338b0d03 | ||
|
|
a35baca580 | ||
|
|
66b0108c51 | ||
|
|
2021431e2f | ||
|
|
ab836c6922 | ||
|
|
405b3be496 | ||
|
|
4a27128a1c | ||
|
|
c74bdc97ca | ||
|
|
ddd5e4c76d | ||
|
|
5e6a7e134f | ||
|
|
41bc519855 | ||
|
|
53d82618d9 | ||
|
|
57f548c6c0 | ||
|
|
8d83f64aba | ||
|
|
9162697117 | ||
|
|
47b19e3211 | ||
|
|
590f6d4c19 | ||
|
|
53108e816f | ||
|
|
3ac71e2f7f | ||
|
|
f4fadd366e | ||
|
|
cc38dab76f | ||
|
|
c8be701f0e | ||
|
|
417befb2be | ||
|
|
a0ce7f38e7 | ||
|
|
962e3d8e56 | ||
|
|
3a3df96996 | ||
|
|
2ffa632796 | ||
|
|
3c6c0b253d | ||
|
|
5f40fd6038 | ||
|
|
8e2dc8b3ee | ||
|
|
a02b531e47 | ||
|
|
a4cb2708cc | ||
|
|
973284607d | ||
|
|
28fd2f0314 | ||
|
|
9715873007 | ||
|
|
18a20407f6 | ||
|
|
1a396cfc7b | ||
|
|
e604c914d1 | ||
|
|
a310c160a5 | ||
|
|
45d50b12fd | ||
|
|
e87182264a | ||
|
|
a089d544a5 | ||
|
|
b6fe0be1b2 | ||
|
|
ba325b1581 | ||
|
|
1f47abf195 | ||
|
|
750f35bc36 | ||
|
|
c99d9d95c5 | ||
|
|
4d402b2600 | ||
|
|
64fb002168 | ||
|
|
1308b5bcf3 | ||
|
|
dc3dc4a1f0 | ||
|
|
99bb55af73 | ||
|
|
4a285225db | ||
|
|
d986bd2a6c | ||
|
|
8665342edf | ||
|
|
2e7c3bf789 | ||
|
|
31ea0fe3fe | ||
|
|
e0c9f8a5aa | ||
|
|
a17ec4221b | ||
|
|
328beaba35 | ||
|
|
efbbaa5741 | ||
|
|
14be2fa344 | ||
|
|
f3ccad192c | ||
|
|
5e580f9372 | ||
|
|
8410929e86 | ||
|
|
093a5d4ddf | ||
|
|
88028412bd | ||
|
|
11c93231aa | ||
|
|
5366b4c873 | ||
|
|
171e0ed312 | ||
|
|
a5b1b4e103 | ||
|
|
f50ddb436f | ||
|
|
0b4b091580 | ||
|
|
2f6d7ac128 | ||
|
|
6b990e1cee | ||
|
|
ddeed65994 | ||
|
|
d87748fda1 | ||
|
|
50f0ead113 | ||
|
|
4e3075aaba | ||
|
|
87d6684ca7 | ||
|
|
3bd7596873 | ||
|
|
39964bf077 | ||
|
|
089199e7c2 | ||
|
|
7b41b295b7 | ||
|
|
d7bc7a2d38 | ||
|
|
eae75c13bb | ||
|
|
fab13db4b4 | ||
|
|
69d5f521a5 | ||
|
|
c0a55142b5 | ||
|
|
513fb3428a | ||
|
|
9a0ae549f6 | ||
|
|
4410d7f195 | ||
|
|
92aa70182d | ||
|
|
90f5864f1e | ||
|
|
d44de670cd | ||
|
|
cb63025078 | ||
|
|
685e865b42 | ||
|
|
e47f126bd5 | ||
|
|
ea6f70e3c5 | ||
|
|
0469aab433 | ||
|
|
ad13b5eb4e | ||
|
|
7324a4973f | ||
|
|
8bc93d23b2 | ||
|
|
39de098461 | ||
|
|
531f232418 | ||
|
|
c708b685e1 | ||
|
|
65009e2f69 | ||
|
|
cbde91744f | ||
|
|
4c8a92bb0c | ||
|
|
5f047d22f4 | ||
|
|
efdc558cba | ||
|
|
04bd1cfa41 | ||
|
|
11a2e96d06 | ||
|
|
095c5e4f95 | ||
|
|
aa2a2e12cc | ||
|
|
8f231424d1 | ||
|
|
069db28fb6 | ||
|
|
2e747d3ece | ||
|
|
d03aadb367 | ||
|
|
749cde13c4 | ||
|
|
0b43aab855 | ||
|
|
147e24204b | ||
|
|
6580153f29 | ||
|
|
fbc94cfbfc | ||
|
|
e631b145b9 | ||
|
|
8cf0ae0994 | ||
|
|
a551bc5375 | ||
|
|
417053a6a2 | ||
|
|
a1495dd33d | ||
|
|
13c50e428f | ||
|
|
8403ccd3da | ||
|
|
c988bca958 | ||
|
|
e92bd61545 | ||
|
|
e84e8edb29 | ||
|
|
5f3db8e567 | ||
|
|
8215e0221a | ||
|
|
a4ef7205ca | ||
|
|
43ecd8b362 | ||
|
|
4b44d6fb83 | ||
|
|
ba8df96e41 | ||
|
|
722a30812f | ||
|
|
0e2fc07881 | ||
|
|
0ae3e83ce4 | ||
|
|
f4b573379d | ||
|
|
862ca375ee | ||
|
|
06bed20a2a | ||
|
|
5c578c0328 | ||
|
|
530de6741b | ||
|
|
5f7ff460fb | ||
|
|
3b3e1e37b9 | ||
|
|
5f40d9400c | ||
|
|
fcdc642acb | ||
|
|
46f594ab71 | ||
|
|
e8684cbb9d | ||
|
|
a36ab71600 | ||
|
|
35c1ff9014 | ||
|
|
e4ce05f94d | ||
|
|
9a9eb57676 | ||
|
|
86567e7fa5 | ||
|
|
38a624fecf | ||
|
|
fd96859883 | ||
|
|
b7b022cc7b | ||
|
|
94d22ed1aa | ||
|
|
3f4caed922 | ||
|
|
521014cd1f | ||
|
|
09303ab2fb | ||
|
|
df1ac8e1e2 | ||
|
|
7a55c91349 | ||
|
|
c491dfdd3a | ||
|
|
b5da076e2c | ||
|
|
18cd6c81a3 | ||
|
|
d9cc21f761 | ||
|
|
40b19c5e67 | ||
|
|
871f78b570 | ||
|
|
753fbc0c5c | ||
|
|
748277aa0e | ||
|
|
bf40a9ef6d | ||
|
|
733000eaa2 | ||
|
|
06207145af | ||
|
|
6a399a7250 | ||
|
|
7ba22f1a09 | ||
|
|
f54f950f81 | ||
|
|
4625711606 | ||
|
|
5735ea2b3c | ||
|
|
b597d0366a | ||
|
|
9c6dcc4a43 | ||
|
|
27c5464cb6 | ||
|
|
1dad7965d2 | ||
|
|
c14ca1d7fd | ||
|
|
2b9e7432b8 | ||
|
|
547747ff74 | ||
|
|
e5b137b331 | ||
|
|
9e554bdecd | ||
|
|
765b542264 | ||
|
|
182a095420 | ||
|
|
0865cffddf | ||
|
|
5a312b9900 | ||
|
|
af2b2f33c2 | ||
|
|
9aa08dfb9b | ||
|
|
b28c673133 | ||
|
|
9a545f176d | ||
|
|
65728eb6ab | ||
|
|
531e037974 | ||
|
|
a96467cb3e | ||
|
|
6e92a7d93d | ||
|
|
740e63da2b | ||
|
|
a69cae22dd | ||
|
|
8ea3c3c29e | ||
|
|
b195e3435f | ||
|
|
63ab739b3d | ||
|
|
34b4577c0b | ||
|
|
58bb788034 | ||
|
|
9e633b37e7 | ||
|
|
bb6a4842bd | ||
|
|
246727995d | ||
|
|
202695096a | ||
|
|
afbab293a8 | ||
|
|
78faf888af | ||
|
|
5164c21923 | ||
|
|
edcd1a3c5b | ||
|
|
532ab9128f | ||
|
|
a3072aacc2 | ||
|
|
8034e5bbcb | ||
|
|
df7a30bd14 | ||
|
|
27296d8880 | ||
|
|
8549b9bc37 | ||
|
|
7632373097 | ||
|
|
23b0674ac0 | ||
|
|
01f0484a0e | ||
|
|
3ca9035fdb | ||
|
|
caaf9d26db | ||
|
|
eb521b2332 | ||
|
|
68c29ab99e | ||
|
|
f12b7f4319 | ||
|
|
7db331320a | ||
|
|
97ad8a85c3 | ||
|
|
6f588196cb | ||
|
|
20241c27ee | ||
|
|
05d6aea37f | ||
|
|
7e0e7860cd | ||
|
|
a0afd7b8ed | ||
|
|
500369ab2b | ||
|
|
dc26d5c0c8 | ||
|
|
0def02f604 | ||
|
|
0ffa9167da | ||
|
|
a110e8f241 | ||
|
|
491f363392 | ||
|
|
33a67bf7b4 | ||
|
|
1e6f583431 | ||
|
|
5e3412d735 | ||
|
|
e6e4cd63f3 | ||
|
|
d9dfacaaf4 | ||
|
|
d43767b945 | ||
|
|
f5da5f4ef0 | ||
|
|
9a202cc124 | ||
|
|
c305deab52 | ||
|
|
0daaf3b1ec | ||
|
|
cb36754c46 | ||
|
|
8e21504bdb | ||
|
|
7e18aafe20 | ||
|
|
fcf1be52ac | ||
|
|
394bc9ceb8 | ||
|
|
e3786592b2 | ||
|
|
d6eaf8d3d9 | ||
|
|
b1c23336e3 | ||
|
|
44c5073dea | ||
|
|
b7593fac44 | ||
|
|
7a31d09356 | ||
|
|
af116794c4 | ||
|
|
88c85e1d8a | ||
|
|
f7b079b1b4 | ||
|
|
9322b3d07e | ||
|
|
72ffedead7 | ||
|
|
cf3a501562 | ||
|
|
7becdc3034 | ||
|
|
f0d599781d | ||
|
|
3386105048 | ||
|
|
3b8fb70db1 | ||
|
|
c3ae146580 | ||
|
|
0d079f0d89 | ||
|
|
55f5329817 | ||
|
|
79d92c30f8 | ||
|
|
73229501c2 | ||
|
|
32ca91a7c9 | ||
|
|
9f5a90ee9c | ||
|
|
a5307fd8cc | ||
|
|
180589144a | ||
|
|
d9c1867bd7 | ||
|
|
da37d649ec | ||
|
|
9e03ac084e | ||
|
|
082c51109d | ||
|
|
8f44c75dc3 | ||
|
|
4204b4af90 | ||
|
|
234f0d75e8 | ||
|
|
564186a1f9 | ||
|
|
ccdb477dbb | ||
|
|
5f92f9e965 | ||
|
|
c2db4390bb | ||
|
|
11c21b5259 | ||
|
|
3cd9e17e3f | ||
|
|
1982ce796f | ||
|
|
941650f668 | ||
|
|
9c0c6c1bd6 | ||
|
|
825e18a551 | ||
|
|
9ff0128fb1 | ||
|
|
bd0ddafcd0 | ||
|
|
36c3617204 | ||
|
|
90a9db3a91 | ||
|
|
59d6795d9e | ||
|
|
2c07cf50fa | ||
|
|
cc0e525dc5 | ||
|
|
73bd973109 | ||
|
|
19f5e92a74 | ||
|
|
a7e501d874 | ||
|
|
3202c38061 | ||
|
|
4676f0595c | ||
|
|
e35a8c942b | ||
|
|
31811eb91e | ||
|
|
b9316a4112 | ||
|
|
b7abd878ac | ||
|
|
38c2c47789 | ||
|
|
1d3d70e8d6 | ||
|
|
c03778ec8b | ||
|
|
29b0850a94 | ||
|
|
712fde46eb | ||
|
|
c2e79ca5a7 | ||
|
|
c3a52b3989 | ||
|
|
7213d82f1b | ||
|
|
5bcad69cf7 | ||
|
|
c9a487fa4d | ||
|
|
3804a46f3b | ||
|
|
52c0bb5302 | ||
|
|
8aa19e6420 | ||
|
|
4d1c7a3884 | ||
|
|
25f2c057b7 | ||
|
|
010be05920 | ||
|
|
4c465850a2 | ||
|
|
8313dfaeb9 | ||
|
|
873f2b2814 | ||
|
|
e53c90f8f0 | ||
|
|
9499ea8ca9 | ||
|
|
f6c09109ba | ||
|
|
273b5768c4 | ||
|
|
ee13cf7dd9 | ||
|
|
fecbae761e | ||
|
|
e0ee89bdd9 | ||
|
|
833c1f22a3 | ||
|
|
6fed6c8d30 | ||
|
|
94cdaf5314 | ||
|
|
f83ae27352 | ||
|
|
6badf047c3 | ||
|
|
47de9ad15f | ||
|
|
09b91cc663 | ||
|
|
ded16549f7 | ||
|
|
c89e47577b | ||
|
|
bb50beb7ab | ||
|
|
e4cd4d64d7 | ||
|
|
5675fc51a0 | ||
|
|
c7438c4aff | ||
|
|
4a6a3da36c | ||
|
|
a657c332b1 | ||
|
|
cc9cd3fc14 | ||
|
|
234258a077 | ||
|
|
13cda80ee6 | ||
|
|
f6e142baf5 | ||
|
|
ddf1f9bcd5 | ||
|
|
aa950669f6 | ||
|
|
dacd5d3e6b | ||
|
|
e76ccba2f7 | ||
|
|
3933819d53 | ||
|
|
99019c2b1f | ||
|
|
4bf5eb398b | ||
|
|
dbfbac62c0 | ||
|
|
7685293da4 | ||
|
|
ee9c328606 | ||
|
|
cb7790ccba | ||
|
|
6556fcc531 | ||
|
|
178391e7b2 | ||
|
|
18922a1c6d | ||
|
|
5e9e26fa67 | ||
|
|
f5430f9151 | ||
|
|
4dfdf2f92f | ||
|
|
e4d283cc99 | ||
|
|
8ee64d22b3 | ||
|
|
10e3e80042 | ||
|
|
f77a208e2c | ||
|
|
9366dbb96e | ||
|
|
550b17552b | ||
|
|
bec307d0e9 | ||
|
|
93c751f6eb | ||
|
|
bada88157e | ||
|
|
13f3137701 | ||
|
|
d3316ff6ff | ||
|
|
1b384e61b4 | ||
|
|
addea20cab | ||
|
|
fac23f2f57 | ||
|
|
bffe1ccb3d | ||
|
|
e577434fe6 | ||
|
|
5d1d9827e4 | ||
|
|
dd28ad20ef | ||
|
|
ef416ef60b | ||
|
|
95b3b55971 | ||
|
|
b3f32ae03e | ||
|
|
c7472174e5 | ||
|
|
2ad749354d | ||
|
|
4ed9d2ea22 | ||
|
|
280eb47de7 | ||
|
|
324a12b0ff | ||
|
|
a2543ccddc | ||
|
|
22666412c3 | ||
|
|
dd58044cdf | ||
|
|
10312d89d7 | ||
|
|
b4c0d877cb | ||
|
|
e95d56a5d0 | ||
|
|
90424e8329 | ||
|
|
1bfeb42a06 | ||
|
|
a936f92954 | ||
|
|
0bc514ec17 | ||
|
|
a2cf4001af | ||
|
|
cb4e12a68c | ||
|
|
a7f5124dfe | ||
|
|
ccbf71c5e7 | ||
|
|
04bf5f58d9 | ||
|
|
ab3f5956d4 | ||
|
|
c1fe8e583f | ||
|
|
fd166c4433 | ||
|
|
f29c7ba4f2 | ||
|
|
88869e9710 | ||
|
|
f8404ab043 | ||
|
|
9fa5d1ff9e | ||
|
|
483f353fd0 | ||
|
|
a11bf5b5c7 | ||
|
|
d4113ff753 | ||
|
|
1969f036fa | ||
|
|
8c90e01016 | ||
|
|
756c5c9b99 | ||
|
|
ee54b355af | ||
|
|
26cbbc0c56 | ||
|
|
f4f719d52a | ||
|
|
f2071d8b7e | ||
|
|
df88a55784 | ||
|
|
3ccbc626ff | ||
|
|
71a15cf222 | ||
|
|
26ddf769b1 | ||
|
|
3137387c0c | ||
|
|
fc142cfde8 | ||
|
|
b0503fa507 | ||
|
|
b86a97c9c0 | ||
|
|
eb6cd23772 | ||
|
|
efae1e7e6c | ||
|
|
19d55b840e | ||
|
|
cc0c1d05ab | ||
|
|
f088f65d5a | ||
|
|
5441b5a06b | ||
|
|
efc56c0a88 | ||
|
|
321fca2c0a | ||
|
|
bbd66e9cb0 | ||
|
|
eb0277146c | ||
|
|
10ee32ec48 | ||
|
|
bdb4be89ff | ||
|
|
61445e0b56 | ||
|
|
f15a010e0e | ||
|
|
58747004fe | ||
|
|
e7ff1eb66b | ||
|
|
4a00bd4797 | ||
|
|
2e6fc7e4a0 | ||
|
|
4a8f323be7 | ||
|
|
c7d82102ed | ||
|
|
068b861edc | ||
|
|
3c908c6a09 | ||
|
|
ba3805786c | ||
|
|
70afb197f1 | ||
|
|
d966e35054 | ||
|
|
1675570291 | ||
|
|
9b88de656e | ||
|
|
3d39b5653d | ||
|
|
eb5f7f64ad | ||
|
|
9fc0164c4d | ||
|
|
65eb520cca | ||
|
|
f7f07932b4 | ||
|
|
de52494039 | ||
|
|
4d87ee2bb6 | ||
|
|
d0ba0936ca | ||
|
|
b08556861f | ||
|
|
c96628ad49 | ||
|
|
a615882b3f | ||
|
|
2bcc8e0d30 | ||
|
|
de519edf78 | ||
|
|
caf47943c3 | ||
|
|
427ab12724 | ||
|
|
eba16c0cc3 | ||
|
|
a485de6359 | ||
|
|
1a985f7e82 | ||
|
|
7867411095 | ||
|
|
2f6ebd16c1 | ||
|
|
878b235614 | ||
|
|
75f9c6b0fb | ||
|
|
7c1e2bf96f | ||
|
|
181b44e117 | ||
|
|
f7793976fb | ||
|
|
8ffcd9b60a | ||
|
|
52d3c4d62d | ||
|
|
0fb3e75253 | ||
|
|
2c40e403c4 | ||
|
|
d1c519ed0d | ||
|
|
27470ef934 | ||
|
|
8a1da87702 | ||
|
|
c8d89f805b | ||
|
|
c9fceafc16 | ||
|
|
bbb9980941 | ||
|
|
da55d6f7cd | ||
|
|
eeacdc1359 | ||
|
|
ee1e92e1cb | ||
|
|
705802e584 | ||
|
|
b2e509f055 | ||
|
|
cca70764d4 | ||
|
|
3ac94710fb | ||
|
|
ca73a47785 | ||
|
|
1ef67fc8e9 | ||
|
|
8f3c2f4f3d | ||
|
|
e42b98ec17 | ||
|
|
efb318a979 | ||
|
|
3c0a82293c | ||
|
|
e867f31c31 | ||
|
|
aeb6da111b | ||
|
|
2736fa5202 | ||
|
|
4d3df867da | ||
|
|
62f78e4312 | ||
|
|
d223ac4675 | ||
|
|
c16404bb2d | ||
|
|
cf70933e21 | ||
|
|
46222e9352 | ||
|
|
212e94756b | ||
|
|
b42abbd4a2 | ||
|
|
730a55e721 | ||
|
|
06cf83b901 | ||
|
|
673e5af030 | ||
|
|
a0bc16c255 | ||
|
|
76b5234f7b | ||
|
|
928de47d1d | ||
|
|
274db6f606 | ||
|
|
89ca0ca927 | ||
|
|
8047008fa5 | ||
|
|
f914110626 | ||
|
|
5656fd0b96 | ||
|
|
c3d8c72302 | ||
|
|
1eefff9025 | ||
|
|
1dc7c7b0a4 | ||
|
|
011bac7b4f | ||
|
|
dc2d6e60d8 | ||
|
|
7809b6e50f | ||
|
|
f7f0370bf5 | ||
|
|
6300fc5364 | ||
|
|
16270cbd1a | ||
|
|
3b226dd2c0 | ||
|
|
4ac61d18ff | ||
|
|
fd7abdb8a4 | ||
|
|
92cd85b204 | ||
|
|
4bb7998208 | ||
|
|
91b22311af | ||
|
|
ddd00d4c25 | ||
|
|
428997f26a | ||
|
|
c9d35d8096 | ||
|
|
761b3bd591 | ||
|
|
a440e6f115 | ||
|
|
837b1a9a73 | ||
|
|
bed37184d1 | ||
|
|
785ed480bb | ||
|
|
d8c39c42a1 | ||
|
|
4b06138d35 | ||
|
|
bd5668d15d | ||
|
|
1d6c61cc5b | ||
|
|
ed22e53cb6 | ||
|
|
d18a34785c | ||
|
|
79fb8de7b7 | ||
|
|
07f5f3f1bb | ||
|
|
8fffa40502 | ||
|
|
6680b32579 | ||
|
|
af618f42bd | ||
|
|
aafcce871e | ||
|
|
71d1418559 | ||
|
|
e0678cc869 | ||
|
|
74ddf7114c | ||
|
|
837d4c1597 | ||
|
|
ccb85737f7 | ||
|
|
f9a4699e84 | ||
|
|
bab3aea8ff | ||
|
|
c52cf1fc3f | ||
|
|
3fe43a5b57 | ||
|
|
1a8b6d2fe7 | ||
|
|
570a4b7915 | ||
|
|
63859b81ad | ||
|
|
d8d13f8bf6 | ||
|
|
c3ce44e202 | ||
|
|
3372cdc0df | ||
|
|
82fc945d73 | ||
|
|
040bd52705 | ||
|
|
415cfcb72f | ||
|
|
2b0efb32c1 | ||
|
|
a3a4fdd7fc | ||
|
|
78f6bbf7fe | ||
|
|
43606d26e4 | ||
|
|
b77c409257 | ||
|
|
96f77a6275 | ||
|
|
2336e36314 | ||
|
|
9146c31abf | ||
|
|
bd4c431eb4 | ||
|
|
b620e5319a | ||
|
|
f12df8ded4 | ||
|
|
0ecd920ad9 | ||
|
|
b40be8c494 | ||
|
|
f7c5e64fbc | ||
|
|
6eea2526f6 | ||
|
|
be9db47276 | ||
|
|
35cb81518c | ||
|
|
4042b8f026 | ||
|
|
a3d1b2d671 | ||
|
|
eec8c41e20 | ||
|
|
4f9fe7245b | ||
|
|
6e1ae69691 | ||
|
|
65a1fcfda5 | ||
|
|
373e11495d | ||
|
|
8b6eac3c1c | ||
|
|
43bae7fb01 | ||
|
|
18ee1e2685 | ||
|
|
5b91b5f436 | ||
|
|
54749dfd1e | ||
|
|
f86212dfe1 | ||
|
|
9ed2e2b0ca | ||
|
|
a29cd622c3 | ||
|
|
6cea0139d1 | ||
|
|
45a6a930c9 | ||
|
|
22b273b145 | ||
|
|
ca71c88744 | ||
|
|
20b93e9fba | ||
|
|
05b29a7e9a | ||
|
|
913ef5c817 | ||
|
|
60534597e0 | ||
|
|
a7173b6bc9 | ||
|
|
6deb51428a | ||
|
|
2f00a642be | ||
|
|
4e47960440 | ||
|
|
67b54ac1eb | ||
|
|
0e82b6981f | ||
|
|
d6bf52c11f | ||
|
|
c1ac66f6e5 | ||
|
|
b9e4a66fdc | ||
|
|
9c363be16f | ||
|
|
affab384cf | ||
|
|
0fc546962e | ||
|
|
d215d96b9b | ||
|
|
327e873ef6 | ||
|
|
a2f65de1ce | ||
|
|
bc23129759 | ||
|
|
3e7b184ab4 | ||
|
|
fe0b0d1157 | ||
|
|
55b1c021ec | ||
|
|
21cf4cd2ce | ||
|
|
defc98ab0e | ||
|
|
74af03408f | ||
|
|
1d151d8fa6 | ||
|
|
e5aeced045 | ||
|
|
17d39143ac | ||
|
|
26c37ba824 | ||
|
|
d380cc31fa | ||
|
|
aa2fedee9d | ||
|
|
14fa0e478a | ||
|
|
ac878d46a5 | ||
|
|
6da0a473be | ||
|
|
2642ec85e5 | ||
|
|
26d2152a36 | ||
|
|
1cfd404321 | ||
|
|
207020b7a0 | ||
|
|
6ad9a5952e | ||
|
|
0511680fc5 | ||
|
|
ad14503e9f | ||
|
|
9221f25e35 | ||
|
|
95eec90a62 | ||
|
|
927cb51b5d | ||
|
|
9f4025fdfb | ||
|
|
b57336f6cf | ||
|
|
6e1c2fd7fd | ||
|
|
50e3b7cd5a | ||
|
|
8beda5b0ae | ||
|
|
9998ed177b | ||
|
|
e2db3d84d8 | ||
|
|
141a390105 | ||
|
|
78ad5d5879 | ||
|
|
2ddd38796d | ||
|
|
35b220d7a5 | ||
|
|
8093faee19 | ||
|
|
10a7bd2eff | ||
|
|
2f8a25ae26 | ||
|
|
19bf80dfaf | ||
|
|
fbfaac9859 |
5
.claude/settings.json
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
{
|
||||||
|
"attribution": {
|
||||||
|
"commit": ""
|
||||||
|
}
|
||||||
|
}
|
||||||
34
.github/ISSUE_TEMPLATE/bug-report.yml
vendored
@@ -1,6 +1,6 @@
|
|||||||
name: 🐛 Bug Report
|
name: 🐛 Bug Report
|
||||||
description: Create a report to help us improve embassyOS
|
description: Create a report to help us improve StartOS
|
||||||
title: '[bug]: '
|
title: "[bug]: "
|
||||||
labels: [Bug, Needs Triage]
|
labels: [Bug, Needs Triage]
|
||||||
assignees:
|
assignees:
|
||||||
- MattDHill
|
- MattDHill
|
||||||
@@ -10,27 +10,25 @@ body:
|
|||||||
label: Prerequisites
|
label: Prerequisites
|
||||||
description: Please confirm you have completed the following.
|
description: Please confirm you have completed the following.
|
||||||
options:
|
options:
|
||||||
- label: I have searched for [existing issues](https://github.com/start9labs/embassy-os/issues) that already report this problem.
|
- label: I have searched for [existing issues](https://github.com/start9labs/start-os/issues) that already report this problem.
|
||||||
required: true
|
required: true
|
||||||
- type: input
|
- type: input
|
||||||
attributes:
|
attributes:
|
||||||
label: embassyOS Version
|
label: Server Hardware
|
||||||
description: What version of embassyOS are you running?
|
description: On what hardware are you running StartOS? Please be as detailed as possible!
|
||||||
placeholder: e.g. 0.3.0
|
placeholder: Pi (8GB) w/ 32GB microSD & Samsung T7 SSD
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
- type: input
|
||||||
|
attributes:
|
||||||
|
label: StartOS Version
|
||||||
|
description: What version of StartOS are you running?
|
||||||
|
placeholder: e.g. 0.3.4.3
|
||||||
validations:
|
validations:
|
||||||
required: true
|
required: true
|
||||||
- type: dropdown
|
- type: dropdown
|
||||||
attributes:
|
attributes:
|
||||||
label: Device
|
label: Client OS
|
||||||
description: What device are you using to connect to Embassy?
|
|
||||||
options:
|
|
||||||
- Phone/tablet
|
|
||||||
- Laptop/Desktop
|
|
||||||
validations:
|
|
||||||
required: true
|
|
||||||
- type: dropdown
|
|
||||||
attributes:
|
|
||||||
label: Device OS
|
|
||||||
description: What operating system is your device running?
|
description: What operating system is your device running?
|
||||||
options:
|
options:
|
||||||
- MacOS
|
- MacOS
|
||||||
@@ -45,14 +43,14 @@ body:
|
|||||||
required: true
|
required: true
|
||||||
- type: input
|
- type: input
|
||||||
attributes:
|
attributes:
|
||||||
label: Device OS Version
|
label: Client OS Version
|
||||||
description: What version is your device OS?
|
description: What version is your device OS?
|
||||||
validations:
|
validations:
|
||||||
required: true
|
required: true
|
||||||
- type: dropdown
|
- type: dropdown
|
||||||
attributes:
|
attributes:
|
||||||
label: Browser
|
label: Browser
|
||||||
description: What browser are you using to connect to Embassy?
|
description: What browser are you using to connect to your server?
|
||||||
options:
|
options:
|
||||||
- Firefox
|
- Firefox
|
||||||
- Brave
|
- Brave
|
||||||
|
|||||||
8
.github/ISSUE_TEMPLATE/feature-request.yml
vendored
@@ -1,6 +1,6 @@
|
|||||||
name: 💡 Feature Request
|
name: 💡 Feature Request
|
||||||
description: Suggest an idea for embassyOS
|
description: Suggest an idea for StartOS
|
||||||
title: '[feat]: '
|
title: "[feat]: "
|
||||||
labels: [Enhancement]
|
labels: [Enhancement]
|
||||||
assignees:
|
assignees:
|
||||||
- MattDHill
|
- MattDHill
|
||||||
@@ -10,7 +10,7 @@ body:
|
|||||||
label: Prerequisites
|
label: Prerequisites
|
||||||
description: Please confirm you have completed the following.
|
description: Please confirm you have completed the following.
|
||||||
options:
|
options:
|
||||||
- label: I have searched for [existing issues](https://github.com/start9labs/embassy-os/issues) that already suggest this feature.
|
- label: I have searched for [existing issues](https://github.com/start9labs/start-os/issues) that already suggest this feature.
|
||||||
required: true
|
required: true
|
||||||
- type: textarea
|
- type: textarea
|
||||||
attributes:
|
attributes:
|
||||||
@@ -27,7 +27,7 @@ body:
|
|||||||
- type: textarea
|
- type: textarea
|
||||||
attributes:
|
attributes:
|
||||||
label: Describe Preferred Solution
|
label: Describe Preferred Solution
|
||||||
description: How you want this feature added to embassyOS?
|
description: How you want this feature added to StartOS?
|
||||||
- type: textarea
|
- type: textarea
|
||||||
attributes:
|
attributes:
|
||||||
label: Describe Alternatives
|
label: Describe Alternatives
|
||||||
|
|||||||
81
.github/actions/setup-build/action.yml
vendored
Normal file
@@ -0,0 +1,81 @@
|
|||||||
|
name: Setup Build Environment
|
||||||
|
description: Common build environment setup steps
|
||||||
|
|
||||||
|
inputs:
|
||||||
|
nodejs-version:
|
||||||
|
description: Node.js version
|
||||||
|
required: true
|
||||||
|
setup-python:
|
||||||
|
description: Set up Python
|
||||||
|
required: false
|
||||||
|
default: "false"
|
||||||
|
setup-docker:
|
||||||
|
description: Set up Docker QEMU and Buildx
|
||||||
|
required: false
|
||||||
|
default: "true"
|
||||||
|
setup-sccache:
|
||||||
|
description: Configure sccache for GitHub Actions
|
||||||
|
required: false
|
||||||
|
default: "true"
|
||||||
|
free-space:
|
||||||
|
description: Remove unnecessary packages to free disk space
|
||||||
|
required: false
|
||||||
|
default: "true"
|
||||||
|
|
||||||
|
runs:
|
||||||
|
using: composite
|
||||||
|
steps:
|
||||||
|
- name: Free disk space
|
||||||
|
if: inputs.free-space == 'true'
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
sudo apt-get remove --purge -y azure-cli || true
|
||||||
|
sudo apt-get remove --purge -y firefox || true
|
||||||
|
sudo apt-get remove --purge -y ghc-* || true
|
||||||
|
sudo apt-get remove --purge -y google-cloud-sdk || true
|
||||||
|
sudo apt-get remove --purge -y google-chrome-stable || true
|
||||||
|
sudo apt-get remove --purge -y powershell || true
|
||||||
|
sudo apt-get remove --purge -y php* || true
|
||||||
|
sudo apt-get remove --purge -y ruby* || true
|
||||||
|
sudo apt-get remove --purge -y mono-* || true
|
||||||
|
sudo apt-get autoremove -y
|
||||||
|
sudo apt-get clean
|
||||||
|
sudo rm -rf /usr/lib/jvm
|
||||||
|
sudo rm -rf /usr/local/.ghcup
|
||||||
|
sudo rm -rf /usr/local/lib/android
|
||||||
|
sudo rm -rf /usr/share/dotnet
|
||||||
|
sudo rm -rf /usr/share/swift
|
||||||
|
sudo rm -rf "$AGENT_TOOLSDIRECTORY"
|
||||||
|
|
||||||
|
# BuildJet runners lack /opt/hostedtoolcache, which setup-python and setup-qemu expect
|
||||||
|
- name: Ensure hostedtoolcache exists
|
||||||
|
shell: bash
|
||||||
|
run: sudo mkdir -p /opt/hostedtoolcache && sudo chown $USER:$USER /opt/hostedtoolcache
|
||||||
|
|
||||||
|
- name: Set up Python
|
||||||
|
if: inputs.setup-python == 'true'
|
||||||
|
uses: actions/setup-python@v5
|
||||||
|
with:
|
||||||
|
python-version: "3.x"
|
||||||
|
|
||||||
|
- uses: actions/setup-node@v4
|
||||||
|
with:
|
||||||
|
node-version: ${{ inputs.nodejs-version }}
|
||||||
|
cache: npm
|
||||||
|
cache-dependency-path: "**/package-lock.json"
|
||||||
|
|
||||||
|
- name: Set up Docker QEMU
|
||||||
|
if: inputs.setup-docker == 'true'
|
||||||
|
uses: docker/setup-qemu-action@v3
|
||||||
|
|
||||||
|
- name: Set up Docker Buildx
|
||||||
|
if: inputs.setup-docker == 'true'
|
||||||
|
uses: docker/setup-buildx-action@v3
|
||||||
|
|
||||||
|
- name: Configure sccache
|
||||||
|
if: inputs.setup-sccache == 'true'
|
||||||
|
uses: actions/github-script@v7
|
||||||
|
with:
|
||||||
|
script: |
|
||||||
|
core.exportVariable('ACTIONS_RESULTS_URL', process.env.ACTIONS_RESULTS_URL || '');
|
||||||
|
core.exportVariable('ACTIONS_RUNTIME_TOKEN', process.env.ACTIONS_RUNTIME_TOKEN || '');
|
||||||
29
.github/workflows/README.md
vendored
@@ -1,29 +0,0 @@
|
|||||||
# This folder contains GitHub Actions workflows for building the project
|
|
||||||
|
|
||||||
## backend
|
|
||||||
Runs: manually (on: workflow_dispatch) or called by product-pipeline (on: workflow_call)
|
|
||||||
|
|
||||||
This workflow uses the actions and docker/setup-buildx-action@v1 to prepare the environment for aarch64 cross complilation using docker buildx.
|
|
||||||
When execution of aarch64 containers is required the action docker/setup-qemu-action@v1 is added.
|
|
||||||
A matrix-strategy has been used to build for both x86_64 and aarch64 platforms in parallel.
|
|
||||||
|
|
||||||
### Running unittests
|
|
||||||
|
|
||||||
Unittests are run using [cargo-nextest]( https://nexte.st/). First the sources are (cross-)compiled and archived. The archive is then run on the correct platform.
|
|
||||||
|
|
||||||
## frontend
|
|
||||||
Runs: manually (on: workflow_dispatch) or called by product-pipeline (on: workflow_call)
|
|
||||||
|
|
||||||
This workflow builds the frontends.
|
|
||||||
|
|
||||||
## product
|
|
||||||
Runs: when a pull request targets the master or next branch and when a change to the master or next branch is made
|
|
||||||
|
|
||||||
This workflow builds everything, re-using the backend and frontend workflows.
|
|
||||||
The download and extraction order of artifacts is relevant to `make`, as it checks the file timestamps to decide which targets need to be executed.
|
|
||||||
|
|
||||||
Result: eos.img
|
|
||||||
|
|
||||||
## a note on uploading artifacts
|
|
||||||
|
|
||||||
Artifacts are used to share data between jobs. File permissions are not maintained during artifact upload. Where file permissions are relevant, the workaround using tar has been used. See (here)[https://github.com/actions/upload-artifact#maintaining-file-permissions-and-case-sensitive-files].
|
|
||||||
240
.github/workflows/backend.yaml
vendored
@@ -1,240 +0,0 @@
|
|||||||
name: Backend
|
|
||||||
|
|
||||||
on:
|
|
||||||
workflow_call:
|
|
||||||
workflow_dispatch:
|
|
||||||
|
|
||||||
env:
|
|
||||||
RUST_VERSION: "1.62.1"
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
build_libs:
|
|
||||||
name: Build libs
|
|
||||||
strategy:
|
|
||||||
fail-fast: false
|
|
||||||
matrix:
|
|
||||||
target: [x86_64, aarch64]
|
|
||||||
include:
|
|
||||||
- target: x86_64
|
|
||||||
snapshot_command: ./build-v8-snapshot.sh
|
|
||||||
artifact_name: js_snapshot
|
|
||||||
artifact_path: libs/js_engine/src/artifacts/JS_SNAPSHOT.bin
|
|
||||||
- target: aarch64
|
|
||||||
snapshot_command: ./build-arm-v8-snapshot.sh
|
|
||||||
artifact_name: arm_js_snapshot
|
|
||||||
artifact_path: libs/js_engine/src/artifacts/ARM_JS_SNAPSHOT.bin
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
timeout-minutes: 60
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v3
|
|
||||||
with:
|
|
||||||
submodules: recursive
|
|
||||||
|
|
||||||
- name: Set up QEMU
|
|
||||||
uses: docker/setup-qemu-action@v1
|
|
||||||
if: ${{ matrix.target == 'aarch64' }}
|
|
||||||
|
|
||||||
- name: Set up Docker Buildx
|
|
||||||
uses: docker/setup-buildx-action@v1
|
|
||||||
if: ${{ matrix.target == 'aarch64' }}
|
|
||||||
|
|
||||||
- uses: actions-rs/toolchain@v1
|
|
||||||
with:
|
|
||||||
toolchain: ${{ env.RUST_VERSION }}
|
|
||||||
override: true
|
|
||||||
if: ${{ matrix.target == 'x86_64' }}
|
|
||||||
|
|
||||||
- uses: actions/cache@v3
|
|
||||||
with:
|
|
||||||
path: |
|
|
||||||
~/.cargo/bin/
|
|
||||||
~/.cargo/registry/index/
|
|
||||||
~/.cargo/registry/cache/
|
|
||||||
~/.cargo/git/db/
|
|
||||||
libs/target/
|
|
||||||
key: ${{ runner.os }}-cargo-libs-${{ matrix.target }}-${{ hashFiles('libs/Cargo.lock') }}
|
|
||||||
|
|
||||||
- name: Build v8 snapshot
|
|
||||||
run: ${{ matrix.snapshot_command }}
|
|
||||||
working-directory: libs
|
|
||||||
|
|
||||||
- uses: actions/upload-artifact@v3
|
|
||||||
with:
|
|
||||||
name: ${{ matrix.artifact_name }}
|
|
||||||
path: ${{ matrix.artifact_path }}
|
|
||||||
|
|
||||||
build_backend:
|
|
||||||
name: Build backend
|
|
||||||
strategy:
|
|
||||||
fail-fast: false
|
|
||||||
matrix:
|
|
||||||
target: [x86_64, aarch64]
|
|
||||||
include:
|
|
||||||
- target: x86_64
|
|
||||||
snapshot_download: js_snapshot
|
|
||||||
- target: aarch64
|
|
||||||
snapshot_download: arm_js_snapshot
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
timeout-minutes: 120
|
|
||||||
needs: build_libs
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v3
|
|
||||||
with:
|
|
||||||
submodules: recursive
|
|
||||||
|
|
||||||
- name: Download ${{ matrix.snapshot_download }} artifact
|
|
||||||
uses: actions/download-artifact@v3
|
|
||||||
with:
|
|
||||||
name: ${{ matrix.snapshot_download }}
|
|
||||||
path: libs/js_engine/src/artifacts/
|
|
||||||
|
|
||||||
- uses: actions-rs/toolchain@v1
|
|
||||||
with:
|
|
||||||
toolchain: ${{ env.RUST_VERSION }}
|
|
||||||
override: true
|
|
||||||
if: ${{ matrix.target == 'x86_64' }}
|
|
||||||
|
|
||||||
- uses: actions/cache@v3
|
|
||||||
with:
|
|
||||||
path: |
|
|
||||||
~/.cargo/bin/
|
|
||||||
~/.cargo/registry/index/
|
|
||||||
~/.cargo/registry/cache/
|
|
||||||
~/.cargo/git/db/
|
|
||||||
backend/target/
|
|
||||||
key: ${{ runner.os }}-cargo-backend-${{ matrix.target }}-${{ hashFiles('backend/Cargo.lock') }}
|
|
||||||
|
|
||||||
- name: Install dependencies
|
|
||||||
run: sudo apt-get install libavahi-client-dev
|
|
||||||
if: ${{ matrix.target == 'x86_64' }}
|
|
||||||
|
|
||||||
- name: Check Git Hash
|
|
||||||
run: ./check-git-hash.sh
|
|
||||||
|
|
||||||
- name: Check Environment
|
|
||||||
run: ./check-environment.sh
|
|
||||||
|
|
||||||
- name: Build backend
|
|
||||||
run: cargo build --release --target x86_64-unknown-linux-gnu --locked
|
|
||||||
working-directory: backend
|
|
||||||
if: ${{ matrix.target == 'x86_64' }}
|
|
||||||
|
|
||||||
- name: Build backend
|
|
||||||
run: |
|
|
||||||
docker run --rm \
|
|
||||||
-v "/home/runner/.cargo/registry":/root/.cargo/registry \
|
|
||||||
-v "$(pwd)":/home/rust/src \
|
|
||||||
-P start9/rust-arm-cross:aarch64 \
|
|
||||||
sh -c 'cd /home/rust/src/backend &&
|
|
||||||
rustup install ${{ env.RUST_VERSION }} &&
|
|
||||||
rustup override set ${{ env.RUST_VERSION }} &&
|
|
||||||
rustup target add aarch64-unknown-linux-gnu &&
|
|
||||||
cargo build --release --target ${{ matrix.target }}-unknown-linux-gnu --locked'
|
|
||||||
if: ${{ matrix.target == 'aarch64' }}
|
|
||||||
|
|
||||||
- name: 'Tar files to preserve file permissions'
|
|
||||||
run: make ARCH=${{ matrix.target }} backend-${{ matrix.target }}.tar
|
|
||||||
|
|
||||||
- uses: actions/upload-artifact@v3
|
|
||||||
with:
|
|
||||||
name: backend-${{ matrix.target }}
|
|
||||||
path: backend-${{ matrix.target }}.tar
|
|
||||||
|
|
||||||
- name: Install nextest
|
|
||||||
uses: taiki-e/install-action@nextest
|
|
||||||
|
|
||||||
- name: Build and archive tests
|
|
||||||
run: cargo nextest archive --archive-file nextest-archive-${{ matrix.target }}.tar.zst --target ${{ matrix.target }}-unknown-linux-gnu
|
|
||||||
working-directory: backend
|
|
||||||
if: ${{ matrix.target == 'x86_64' }}
|
|
||||||
|
|
||||||
- name: Build and archive tests
|
|
||||||
run: |
|
|
||||||
docker run --rm \
|
|
||||||
-v "$HOME/.cargo/registry":/root/.cargo/registry \
|
|
||||||
-v "$(pwd)":/home/rust/src \
|
|
||||||
-P start9/rust-arm-cross:aarch64 \
|
|
||||||
sh -c 'cd /home/rust/src/backend &&
|
|
||||||
rustup install ${{ env.RUST_VERSION }} &&
|
|
||||||
rustup override set ${{ env.RUST_VERSION }} &&
|
|
||||||
rustup target add aarch64-unknown-linux-gnu &&
|
|
||||||
curl -LsSf https://get.nexte.st/latest/linux | tar zxf - -C ${CARGO_HOME:-~/.cargo}/bin &&
|
|
||||||
cargo nextest archive --archive-file nextest-archive-${{ matrix.target }}.tar.zst --target ${{ matrix.target }}-unknown-linux-gnu'
|
|
||||||
if: ${{ matrix.target == 'aarch64' }}
|
|
||||||
|
|
||||||
- name: Reset permissions
|
|
||||||
run: sudo chown -R $USER target
|
|
||||||
working-directory: backend
|
|
||||||
if: ${{ matrix.target == 'aarch64' }}
|
|
||||||
|
|
||||||
- name: Upload archive to workflow
|
|
||||||
uses: actions/upload-artifact@v3
|
|
||||||
with:
|
|
||||||
name: nextest-archive-${{ matrix.target }}
|
|
||||||
path: backend/nextest-archive-${{ matrix.target }}.tar.zst
|
|
||||||
|
|
||||||
run_tests_backend:
|
|
||||||
name: Test backend
|
|
||||||
strategy:
|
|
||||||
fail-fast: false
|
|
||||||
matrix:
|
|
||||||
target: [x86_64, aarch64]
|
|
||||||
include:
|
|
||||||
- target: x86_64
|
|
||||||
- target: aarch64
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
timeout-minutes: 60
|
|
||||||
needs: build_backend
|
|
||||||
env:
|
|
||||||
CARGO_TERM_COLOR: always
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v3
|
|
||||||
with:
|
|
||||||
submodules: recursive
|
|
||||||
|
|
||||||
- name: Set up QEMU
|
|
||||||
uses: docker/setup-qemu-action@v1
|
|
||||||
if: ${{ matrix.target == 'aarch64' }}
|
|
||||||
|
|
||||||
- name: Set up Docker Buildx
|
|
||||||
uses: docker/setup-buildx-action@v1
|
|
||||||
if: ${{ matrix.target == 'aarch64' }}
|
|
||||||
|
|
||||||
- run: mkdir -p ~/.cargo/bin
|
|
||||||
if: ${{ matrix.target == 'x86_64' }}
|
|
||||||
|
|
||||||
- name: Install nextest
|
|
||||||
uses: taiki-e/install-action@nextest
|
|
||||||
if: ${{ matrix.target == 'x86_64' }}
|
|
||||||
|
|
||||||
- name: Download archive
|
|
||||||
uses: actions/download-artifact@v3
|
|
||||||
with:
|
|
||||||
name: nextest-archive-${{ matrix.target }}
|
|
||||||
|
|
||||||
- name: Download nextest (aarch64)
|
|
||||||
run: wget -O nextest-aarch64.tar.gz https://get.nexte.st/latest/linux-arm
|
|
||||||
if: ${{ matrix.target == 'aarch64' }}
|
|
||||||
|
|
||||||
- name: Run tests
|
|
||||||
run: |
|
|
||||||
${CARGO_HOME:-~/.cargo}/bin/cargo-nextest nextest run --no-fail-fast --archive-file nextest-archive-${{ matrix.target }}.tar.zst \
|
|
||||||
--filter-expr 'not (test(system::test_get_temp) | test(net::tor::test) | test(system::test_get_disk_usage) | test(net::ssl::certificate_details_persist) | test(net::ssl::ca_details_persist))'
|
|
||||||
if: ${{ matrix.target == 'x86_64' }}
|
|
||||||
|
|
||||||
- name: Run tests
|
|
||||||
run: |
|
|
||||||
docker run --rm --platform linux/arm64/v8 \
|
|
||||||
-v "/home/runner/.cargo/registry":/usr/local/cargo/registry \
|
|
||||||
-v "$(pwd)":/home/rust/src \
|
|
||||||
-e CARGO_TERM_COLOR=${{ env.CARGO_TERM_COLOR }} \
|
|
||||||
-P ubuntu:20.04 \
|
|
||||||
sh -c '
|
|
||||||
apt update &&
|
|
||||||
apt install -y ca-certificates &&
|
|
||||||
cd /home/rust/src &&
|
|
||||||
mkdir -p ~/.cargo/bin &&
|
|
||||||
tar -zxvf nextest-aarch64.tar.gz -C ${CARGO_HOME:-~/.cargo}/bin &&
|
|
||||||
${CARGO_HOME:-~/.cargo}/bin/cargo-nextest nextest run --archive-file nextest-archive-${{ matrix.target }}.tar.zst \
|
|
||||||
--filter-expr "not (test(system::test_get_temp) | test(net::tor::test) | test(system::test_get_disk_usage) | test(net::ssl::certificate_details_persist) | test(net::ssl::ca_details_persist))"'
|
|
||||||
if: ${{ matrix.target == 'aarch64' }}
|
|
||||||
45
.github/workflows/frontend.yaml
vendored
@@ -1,45 +0,0 @@
|
|||||||
name: Frontend
|
|
||||||
|
|
||||||
on:
|
|
||||||
workflow_call:
|
|
||||||
workflow_dispatch:
|
|
||||||
|
|
||||||
env:
|
|
||||||
NODEJS_VERSION: '16'
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
frontend:
|
|
||||||
name: Build frontend
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
timeout-minutes: 60
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v3
|
|
||||||
with:
|
|
||||||
submodules: recursive
|
|
||||||
|
|
||||||
- uses: actions/setup-node@v3
|
|
||||||
with:
|
|
||||||
node-version: ${{ env.NODEJS_VERSION }}
|
|
||||||
|
|
||||||
- name: Get npm cache directory
|
|
||||||
id: npm-cache-dir
|
|
||||||
run: |
|
|
||||||
echo "::set-output name=dir::$(npm config get cache)"
|
|
||||||
- uses: actions/cache@v3
|
|
||||||
id: npm-cache
|
|
||||||
with:
|
|
||||||
path: ${{ steps.npm-cache-dir.outputs.dir }}
|
|
||||||
key: ${{ runner.os }}-node-${{ hashFiles('**/package-lock.json') }}
|
|
||||||
restore-keys: |
|
|
||||||
${{ runner.os }}-node-
|
|
||||||
|
|
||||||
- name: Build frontends
|
|
||||||
run: make frontends
|
|
||||||
|
|
||||||
- name: 'Tar files to preserve file permissions'
|
|
||||||
run: tar -cvf frontend.tar ENVIRONMENT.txt GIT_HASH.txt frontend/dist frontend/config.json
|
|
||||||
|
|
||||||
- uses: actions/upload-artifact@v3
|
|
||||||
with:
|
|
||||||
name: frontend
|
|
||||||
path: frontend.tar
|
|
||||||
137
.github/workflows/product.yaml
vendored
@@ -1,137 +0,0 @@
|
|||||||
name: Build Pipeline
|
|
||||||
|
|
||||||
on:
|
|
||||||
workflow_dispatch:
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- master
|
|
||||||
- next
|
|
||||||
pull_request:
|
|
||||||
branches:
|
|
||||||
- master
|
|
||||||
- next
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
compat:
|
|
||||||
uses: ./.github/workflows/reusable-workflow.yaml
|
|
||||||
with:
|
|
||||||
build_command: make system-images/compat/compat.tar
|
|
||||||
artifact_name: compat.tar
|
|
||||||
artifact_path: system-images/compat/compat.tar
|
|
||||||
|
|
||||||
utils:
|
|
||||||
uses: ./.github/workflows/reusable-workflow.yaml
|
|
||||||
with:
|
|
||||||
build_command: make system-images/utils/utils.tar
|
|
||||||
artifact_name: utils.tar
|
|
||||||
artifact_path: system-images/utils/utils.tar
|
|
||||||
|
|
||||||
binfmt:
|
|
||||||
uses: ./.github/workflows/reusable-workflow.yaml
|
|
||||||
with:
|
|
||||||
build_command: make system-images/binfmt/binfmt.tar
|
|
||||||
artifact_name: binfmt.tar
|
|
||||||
artifact_path: system-images/binfmt/binfmt.tar
|
|
||||||
|
|
||||||
nc-broadcast:
|
|
||||||
uses: ./.github/workflows/reusable-workflow.yaml
|
|
||||||
with:
|
|
||||||
build_command: make cargo-deps/aarch64-unknown-linux-gnu/release/nc-broadcast
|
|
||||||
artifact_name: nc-broadcast.tar
|
|
||||||
artifact_path: cargo-deps/aarch64-unknown-linux-gnu/release/nc-broadcast
|
|
||||||
|
|
||||||
backend:
|
|
||||||
uses: ./.github/workflows/backend.yaml
|
|
||||||
|
|
||||||
frontend:
|
|
||||||
uses: ./.github/workflows/frontend.yaml
|
|
||||||
|
|
||||||
image:
|
|
||||||
name: Build image
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
timeout-minutes: 60
|
|
||||||
needs: [compat,utils,binfmt,nc-broadcast,backend,frontend]
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v3
|
|
||||||
with:
|
|
||||||
submodules: recursive
|
|
||||||
|
|
||||||
- name: Download compat.tar artifact
|
|
||||||
uses: actions/download-artifact@v3
|
|
||||||
with:
|
|
||||||
name: compat.tar
|
|
||||||
path: system-images/compat
|
|
||||||
|
|
||||||
- name: Download utils.tar artifact
|
|
||||||
uses: actions/download-artifact@v3
|
|
||||||
with:
|
|
||||||
name: utils.tar
|
|
||||||
path: system-images/utils
|
|
||||||
|
|
||||||
- name: Download binfmt.tar artifact
|
|
||||||
uses: actions/download-artifact@v3
|
|
||||||
with:
|
|
||||||
name: binfmt.tar
|
|
||||||
path: system-images/binfmt
|
|
||||||
|
|
||||||
- name: Download nc-broadcast.tar artifact
|
|
||||||
uses: actions/download-artifact@v3
|
|
||||||
with:
|
|
||||||
name: nc-broadcast.tar
|
|
||||||
path: cargo-deps/aarch64-unknown-linux-gnu/release
|
|
||||||
|
|
||||||
- name: Download js_snapshot artifact
|
|
||||||
uses: actions/download-artifact@v3
|
|
||||||
with:
|
|
||||||
name: js_snapshot
|
|
||||||
path: libs/js_engine/src/artifacts/
|
|
||||||
|
|
||||||
- name: Download arm_js_snapshot artifact
|
|
||||||
uses: actions/download-artifact@v3
|
|
||||||
with:
|
|
||||||
name: arm_js_snapshot
|
|
||||||
path: libs/js_engine/src/artifacts/
|
|
||||||
|
|
||||||
- name: Download backend artifact
|
|
||||||
uses: actions/download-artifact@v3
|
|
||||||
with:
|
|
||||||
name: backend-aarch64
|
|
||||||
|
|
||||||
- name: 'Extract backend'
|
|
||||||
run:
|
|
||||||
tar -mxvf backend-aarch64.tar
|
|
||||||
|
|
||||||
- name: Download frontend artifact
|
|
||||||
uses: actions/download-artifact@v3
|
|
||||||
with:
|
|
||||||
name: frontend
|
|
||||||
|
|
||||||
- name: Skip frontend build
|
|
||||||
run: |
|
|
||||||
mkdir frontend/node_modules
|
|
||||||
mkdir frontend/dist
|
|
||||||
mkdir patch-db/client/node_modules
|
|
||||||
mkdir patch-db/client/dist
|
|
||||||
|
|
||||||
- name: 'Extract frontend'
|
|
||||||
run: |
|
|
||||||
tar -mxvf frontend.tar frontend/config.json
|
|
||||||
tar -mxvf frontend.tar frontend/dist
|
|
||||||
|
|
||||||
- name: Cache raspiOS
|
|
||||||
id: cache-raspios
|
|
||||||
uses: actions/cache@v3
|
|
||||||
with:
|
|
||||||
path: raspios.img
|
|
||||||
key: cache-raspios
|
|
||||||
|
|
||||||
- name: Build image
|
|
||||||
run: "make V=1 NO_KEY=1 eos.img --debug"
|
|
||||||
|
|
||||||
- name: Compress image
|
|
||||||
run: "make gzip"
|
|
||||||
|
|
||||||
- uses: actions/upload-artifact@v3
|
|
||||||
with:
|
|
||||||
name: image
|
|
||||||
path: eos.tar.gz
|
|
||||||
34
.github/workflows/reusable-workflow.yaml
vendored
@@ -1,34 +0,0 @@
|
|||||||
name: Reusable Workflow
|
|
||||||
|
|
||||||
on:
|
|
||||||
workflow_call:
|
|
||||||
inputs:
|
|
||||||
build_command:
|
|
||||||
required: true
|
|
||||||
type: string
|
|
||||||
artifact_name:
|
|
||||||
required: true
|
|
||||||
type: string
|
|
||||||
artifact_path:
|
|
||||||
required: true
|
|
||||||
type: string
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
generic_build_job:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
timeout-minutes: 60
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v3
|
|
||||||
with:
|
|
||||||
submodules: recursive
|
|
||||||
|
|
||||||
- name: Set up Docker Buildx
|
|
||||||
uses: docker/setup-buildx-action@v1
|
|
||||||
|
|
||||||
- name: Build image
|
|
||||||
run: ${{ inputs.build_command }}
|
|
||||||
|
|
||||||
- uses: actions/upload-artifact@v3
|
|
||||||
with:
|
|
||||||
name: ${{ inputs.artifact_name }}
|
|
||||||
path: ${{ inputs.artifact_path }}
|
|
||||||
88
.github/workflows/start-cli.yaml
vendored
Normal file
@@ -0,0 +1,88 @@
|
|||||||
|
name: start-cli
|
||||||
|
|
||||||
|
on:
|
||||||
|
workflow_call:
|
||||||
|
workflow_dispatch:
|
||||||
|
inputs:
|
||||||
|
environment:
|
||||||
|
type: choice
|
||||||
|
description: Environment
|
||||||
|
options:
|
||||||
|
- NONE
|
||||||
|
- dev
|
||||||
|
- unstable
|
||||||
|
- dev-unstable
|
||||||
|
runner:
|
||||||
|
type: choice
|
||||||
|
description: Runner
|
||||||
|
options:
|
||||||
|
- standard
|
||||||
|
- fast
|
||||||
|
arch:
|
||||||
|
type: choice
|
||||||
|
description: Architecture
|
||||||
|
options:
|
||||||
|
- ALL
|
||||||
|
- x86_64
|
||||||
|
- x86_64-apple
|
||||||
|
- aarch64
|
||||||
|
- aarch64-apple
|
||||||
|
- riscv64
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- master
|
||||||
|
- next/*
|
||||||
|
pull_request:
|
||||||
|
branches:
|
||||||
|
- master
|
||||||
|
- next/*
|
||||||
|
|
||||||
|
concurrency:
|
||||||
|
group: ${{ github.workflow }}-${{ github.head_ref || github.ref }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
|
||||||
|
env:
|
||||||
|
NODEJS_VERSION: "24.11.0"
|
||||||
|
ENVIRONMENT: '${{ fromJson(format(''["{0}", ""]'', github.event.inputs.environment || ''dev''))[github.event.inputs.environment == ''NONE''] }}'
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
compile:
|
||||||
|
name: Build Debian Package
|
||||||
|
if: github.event.pull_request.draft != true
|
||||||
|
strategy:
|
||||||
|
fail-fast: true
|
||||||
|
matrix:
|
||||||
|
triple: >-
|
||||||
|
${{
|
||||||
|
fromJson('{
|
||||||
|
"x86_64": ["x86_64-unknown-linux-musl"],
|
||||||
|
"x86_64-apple": ["x86_64-apple-darwin"],
|
||||||
|
"aarch64": ["aarch64-unknown-linux-musl"],
|
||||||
|
"x86_64-apple": ["aarch64-apple-darwin"],
|
||||||
|
"riscv64": ["riscv64gc-unknown-linux-musl"],
|
||||||
|
"ALL": ["x86_64-unknown-linux-musl", "x86_64-apple-darwin", "aarch64-unknown-linux-musl", "aarch64-apple-darwin", "riscv64gc-unknown-linux-musl"]
|
||||||
|
}')[github.event.inputs.platform || 'ALL']
|
||||||
|
}}
|
||||||
|
runs-on: ${{ fromJson('["ubuntu-latest", "buildjet-32vcpu-ubuntu-2204"]')[github.event.inputs.runner == 'fast'] }}
|
||||||
|
steps:
|
||||||
|
- name: Mount tmpfs
|
||||||
|
if: ${{ github.event.inputs.runner == 'fast' }}
|
||||||
|
run: sudo mount -t tmpfs tmpfs .
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
submodules: recursive
|
||||||
|
- uses: ./.github/actions/setup-build
|
||||||
|
with:
|
||||||
|
nodejs-version: ${{ env.NODEJS_VERSION }}
|
||||||
|
|
||||||
|
- name: Make
|
||||||
|
run: TARGET=${{ matrix.triple }} make cli
|
||||||
|
env:
|
||||||
|
PLATFORM: ${{ matrix.arch }}
|
||||||
|
SCCACHE_GHA_ENABLED: on
|
||||||
|
SCCACHE_GHA_VERSION: 0
|
||||||
|
|
||||||
|
- uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: start-cli_${{ matrix.triple }}
|
||||||
|
path: core/target/${{ matrix.triple }}/release/start-cli
|
||||||
173
.github/workflows/start-registry.yaml
vendored
Normal file
@@ -0,0 +1,173 @@
|
|||||||
|
name: start-registry
|
||||||
|
|
||||||
|
on:
|
||||||
|
workflow_call:
|
||||||
|
workflow_dispatch:
|
||||||
|
inputs:
|
||||||
|
environment:
|
||||||
|
type: choice
|
||||||
|
description: Environment
|
||||||
|
options:
|
||||||
|
- NONE
|
||||||
|
- dev
|
||||||
|
- unstable
|
||||||
|
- dev-unstable
|
||||||
|
runner:
|
||||||
|
type: choice
|
||||||
|
description: Runner
|
||||||
|
options:
|
||||||
|
- standard
|
||||||
|
- fast
|
||||||
|
arch:
|
||||||
|
type: choice
|
||||||
|
description: Architecture
|
||||||
|
options:
|
||||||
|
- ALL
|
||||||
|
- x86_64
|
||||||
|
- aarch64
|
||||||
|
- riscv64
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- master
|
||||||
|
- next/*
|
||||||
|
pull_request:
|
||||||
|
branches:
|
||||||
|
- master
|
||||||
|
- next/*
|
||||||
|
|
||||||
|
concurrency:
|
||||||
|
group: ${{ github.workflow }}-${{ github.head_ref || github.ref }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
|
||||||
|
env:
|
||||||
|
NODEJS_VERSION: "24.11.0"
|
||||||
|
ENVIRONMENT: '${{ fromJson(format(''["{0}", ""]'', github.event.inputs.environment || ''dev''))[github.event.inputs.environment == ''NONE''] }}'
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
compile:
|
||||||
|
name: Build Debian Package
|
||||||
|
if: github.event.pull_request.draft != true
|
||||||
|
strategy:
|
||||||
|
fail-fast: true
|
||||||
|
matrix:
|
||||||
|
arch: >-
|
||||||
|
${{
|
||||||
|
fromJson('{
|
||||||
|
"x86_64": ["x86_64"],
|
||||||
|
"aarch64": ["aarch64"],
|
||||||
|
"riscv64": ["riscv64"],
|
||||||
|
"ALL": ["x86_64", "aarch64", "riscv64"]
|
||||||
|
}')[github.event.inputs.platform || 'ALL']
|
||||||
|
}}
|
||||||
|
runs-on: ${{ fromJson('["ubuntu-latest", "buildjet-32vcpu-ubuntu-2204"]')[github.event.inputs.runner == 'fast'] }}
|
||||||
|
steps:
|
||||||
|
- name: Mount tmpfs
|
||||||
|
if: ${{ github.event.inputs.runner == 'fast' }}
|
||||||
|
run: sudo mount -t tmpfs tmpfs .
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
submodules: recursive
|
||||||
|
- uses: ./.github/actions/setup-build
|
||||||
|
with:
|
||||||
|
nodejs-version: ${{ env.NODEJS_VERSION }}
|
||||||
|
|
||||||
|
- name: Make
|
||||||
|
run: make registry-deb
|
||||||
|
env:
|
||||||
|
PLATFORM: ${{ matrix.arch }}
|
||||||
|
SCCACHE_GHA_ENABLED: on
|
||||||
|
SCCACHE_GHA_VERSION: 0
|
||||||
|
|
||||||
|
- uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: start-registry_${{ matrix.arch }}.deb
|
||||||
|
path: results/start-registry-*_${{ matrix.arch }}.deb
|
||||||
|
|
||||||
|
create-image:
|
||||||
|
name: Create Docker Image
|
||||||
|
needs: [compile]
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
packages: write
|
||||||
|
runs-on: ${{ fromJson('["ubuntu-latest", "buildjet-32vcpu-ubuntu-2204"]')[github.event.inputs.runner == 'fast'] }}
|
||||||
|
steps:
|
||||||
|
- name: Cleaning up unnecessary files
|
||||||
|
run: |
|
||||||
|
sudo apt-get remove --purge -y google-chrome-stable firefox mono-devel
|
||||||
|
sudo apt-get autoremove -y
|
||||||
|
sudo apt-get clean
|
||||||
|
|
||||||
|
- run: |
|
||||||
|
sudo mount -t tmpfs tmpfs .
|
||||||
|
if: ${{ github.event.inputs.runner == 'fast' }}
|
||||||
|
|
||||||
|
- name: Set up docker QEMU
|
||||||
|
uses: docker/setup-qemu-action@v3
|
||||||
|
|
||||||
|
- name: Set up Docker Buildx
|
||||||
|
uses: docker/setup-buildx-action@v3
|
||||||
|
|
||||||
|
- name: "Login to GitHub Container Registry"
|
||||||
|
uses: docker/login-action@v3
|
||||||
|
with:
|
||||||
|
registry: ghcr.io
|
||||||
|
username: ${{github.actor}}
|
||||||
|
password: ${{secrets.GITHUB_TOKEN}}
|
||||||
|
|
||||||
|
- name: Docker meta
|
||||||
|
id: meta
|
||||||
|
uses: docker/metadata-action@v5
|
||||||
|
with:
|
||||||
|
images: ghcr.io/Start9Labs/startos-registry
|
||||||
|
tags: |
|
||||||
|
type=raw,value=${{ github.ref_name }}
|
||||||
|
|
||||||
|
- name: Download debian package
|
||||||
|
uses: actions/download-artifact@v4
|
||||||
|
with:
|
||||||
|
pattern: start-registry_*.deb
|
||||||
|
|
||||||
|
- name: Map matrix.arch to docker platform
|
||||||
|
run: |
|
||||||
|
platforms=""
|
||||||
|
for deb in *.deb; do
|
||||||
|
filename=$(basename "$deb" .deb)
|
||||||
|
arch="${filename#*_}"
|
||||||
|
case "$arch" in
|
||||||
|
x86_64)
|
||||||
|
platform="linux/amd64"
|
||||||
|
;;
|
||||||
|
aarch64)
|
||||||
|
platform="linux/arm64"
|
||||||
|
;;
|
||||||
|
riscv64)
|
||||||
|
platform="linux/riscv64"
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
echo "Unknown architecture: $arch" >&2
|
||||||
|
exit 1
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
if [ -z "$platforms" ]; then
|
||||||
|
platforms="$platform"
|
||||||
|
else
|
||||||
|
platforms="$platforms,$platform"
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
echo "DOCKER_PLATFORM=$platforms" >> "$GITHUB_ENV"
|
||||||
|
|
||||||
|
- run: |
|
||||||
|
cat | docker buildx build --platform "$DOCKER_PLATFORM" --push -t ${{ steps.meta.outputs.tags }} -f - . << 'EOF'
|
||||||
|
FROM debian:trixie
|
||||||
|
|
||||||
|
ADD *.deb .
|
||||||
|
|
||||||
|
RUN apt-get install -y ./*_$(uname -m).deb && rm *.deb
|
||||||
|
|
||||||
|
VOLUME /var/lib/startos
|
||||||
|
|
||||||
|
ENV RUST_LOG=startos=debug
|
||||||
|
|
||||||
|
ENTRYPOINT ["start-registryd"]
|
||||||
|
|
||||||
|
EOF
|
||||||
84
.github/workflows/start-tunnel.yaml
vendored
Normal file
@@ -0,0 +1,84 @@
|
|||||||
|
name: start-tunnel
|
||||||
|
|
||||||
|
on:
|
||||||
|
workflow_call:
|
||||||
|
workflow_dispatch:
|
||||||
|
inputs:
|
||||||
|
environment:
|
||||||
|
type: choice
|
||||||
|
description: Environment
|
||||||
|
options:
|
||||||
|
- NONE
|
||||||
|
- dev
|
||||||
|
- unstable
|
||||||
|
- dev-unstable
|
||||||
|
runner:
|
||||||
|
type: choice
|
||||||
|
description: Runner
|
||||||
|
options:
|
||||||
|
- standard
|
||||||
|
- fast
|
||||||
|
arch:
|
||||||
|
type: choice
|
||||||
|
description: Architecture
|
||||||
|
options:
|
||||||
|
- ALL
|
||||||
|
- x86_64
|
||||||
|
- aarch64
|
||||||
|
- riscv64
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- master
|
||||||
|
- next/*
|
||||||
|
pull_request:
|
||||||
|
branches:
|
||||||
|
- master
|
||||||
|
- next/*
|
||||||
|
|
||||||
|
concurrency:
|
||||||
|
group: ${{ github.workflow }}-${{ github.head_ref || github.ref }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
|
||||||
|
env:
|
||||||
|
NODEJS_VERSION: "24.11.0"
|
||||||
|
ENVIRONMENT: '${{ fromJson(format(''["{0}", ""]'', github.event.inputs.environment || ''dev''))[github.event.inputs.environment == ''NONE''] }}'
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
compile:
|
||||||
|
name: Build Debian Package
|
||||||
|
if: github.event.pull_request.draft != true
|
||||||
|
strategy:
|
||||||
|
fail-fast: true
|
||||||
|
matrix:
|
||||||
|
arch: >-
|
||||||
|
${{
|
||||||
|
fromJson('{
|
||||||
|
"x86_64": ["x86_64"],
|
||||||
|
"aarch64": ["aarch64"],
|
||||||
|
"riscv64": ["riscv64"],
|
||||||
|
"ALL": ["x86_64", "aarch64", "riscv64"]
|
||||||
|
}')[github.event.inputs.platform || 'ALL']
|
||||||
|
}}
|
||||||
|
runs-on: ${{ fromJson('["ubuntu-latest", "buildjet-32vcpu-ubuntu-2204"]')[github.event.inputs.runner == 'fast'] }}
|
||||||
|
steps:
|
||||||
|
- name: Mount tmpfs
|
||||||
|
if: ${{ github.event.inputs.runner == 'fast' }}
|
||||||
|
run: sudo mount -t tmpfs tmpfs .
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
submodules: recursive
|
||||||
|
- uses: ./.github/actions/setup-build
|
||||||
|
with:
|
||||||
|
nodejs-version: ${{ env.NODEJS_VERSION }}
|
||||||
|
|
||||||
|
- name: Make
|
||||||
|
run: make tunnel-deb
|
||||||
|
env:
|
||||||
|
PLATFORM: ${{ matrix.arch }}
|
||||||
|
SCCACHE_GHA_ENABLED: on
|
||||||
|
SCCACHE_GHA_VERSION: 0
|
||||||
|
|
||||||
|
- uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: start-tunnel_${{ matrix.arch }}.deb
|
||||||
|
path: results/start-tunnel-*_${{ matrix.arch }}.deb
|
||||||
256
.github/workflows/startos-iso.yaml
vendored
Normal file
@@ -0,0 +1,256 @@
|
|||||||
|
name: Debian-based ISO and SquashFS
|
||||||
|
|
||||||
|
on:
|
||||||
|
workflow_call:
|
||||||
|
workflow_dispatch:
|
||||||
|
inputs:
|
||||||
|
environment:
|
||||||
|
type: choice
|
||||||
|
description: Environment
|
||||||
|
options:
|
||||||
|
- NONE
|
||||||
|
- dev
|
||||||
|
- unstable
|
||||||
|
- dev-unstable
|
||||||
|
runner:
|
||||||
|
type: choice
|
||||||
|
description: Runner
|
||||||
|
options:
|
||||||
|
- standard
|
||||||
|
- fast
|
||||||
|
platform:
|
||||||
|
type: choice
|
||||||
|
description: Platform
|
||||||
|
options:
|
||||||
|
- ALL
|
||||||
|
- x86_64
|
||||||
|
- x86_64-nonfree
|
||||||
|
- aarch64
|
||||||
|
- aarch64-nonfree
|
||||||
|
# - raspberrypi
|
||||||
|
- riscv64
|
||||||
|
deploy:
|
||||||
|
type: choice
|
||||||
|
description: Deploy
|
||||||
|
options:
|
||||||
|
- NONE
|
||||||
|
- alpha
|
||||||
|
- beta
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- master
|
||||||
|
- next/*
|
||||||
|
pull_request:
|
||||||
|
branches:
|
||||||
|
- master
|
||||||
|
- next/*
|
||||||
|
|
||||||
|
concurrency:
|
||||||
|
group: ${{ github.workflow }}-${{ github.head_ref || github.ref }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
|
||||||
|
env:
|
||||||
|
NODEJS_VERSION: "24.11.0"
|
||||||
|
ENVIRONMENT: '${{ fromJson(format(''["{0}", ""]'', github.event.inputs.environment || ''dev''))[github.event.inputs.environment == ''NONE''] }}'
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
compile:
|
||||||
|
name: Compile Base Binaries
|
||||||
|
if: github.event.pull_request.draft != true
|
||||||
|
strategy:
|
||||||
|
fail-fast: true
|
||||||
|
matrix:
|
||||||
|
arch: >-
|
||||||
|
${{
|
||||||
|
fromJson('{
|
||||||
|
"x86_64": ["x86_64"],
|
||||||
|
"x86_64-nonfree": ["x86_64"],
|
||||||
|
"aarch64": ["aarch64"],
|
||||||
|
"aarch64-nonfree": ["aarch64"],
|
||||||
|
"raspberrypi": ["aarch64"],
|
||||||
|
"riscv64": ["riscv64"],
|
||||||
|
"ALL": ["x86_64", "aarch64", "riscv64"]
|
||||||
|
}')[github.event.inputs.platform || 'ALL']
|
||||||
|
}}
|
||||||
|
runs-on: >-
|
||||||
|
${{
|
||||||
|
fromJson(
|
||||||
|
format(
|
||||||
|
'["{0}", "{1}"]',
|
||||||
|
fromJson('{
|
||||||
|
"x86_64": "ubuntu-latest",
|
||||||
|
"aarch64": "ubuntu-24.04-arm",
|
||||||
|
"riscv64": "ubuntu-latest"
|
||||||
|
}')[matrix.arch],
|
||||||
|
fromJson('{
|
||||||
|
"x86_64": "buildjet-32vcpu-ubuntu-2204",
|
||||||
|
"aarch64": "buildjet-32vcpu-ubuntu-2204-arm",
|
||||||
|
"riscv64": "buildjet-32vcpu-ubuntu-2204"
|
||||||
|
}')[matrix.arch]
|
||||||
|
)
|
||||||
|
)[github.event.inputs.runner == 'fast']
|
||||||
|
}}
|
||||||
|
steps:
|
||||||
|
- name: Mount tmpfs
|
||||||
|
if: ${{ github.event.inputs.runner == 'fast' }}
|
||||||
|
run: sudo mount -t tmpfs tmpfs .
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
submodules: recursive
|
||||||
|
- uses: ./.github/actions/setup-build
|
||||||
|
with:
|
||||||
|
nodejs-version: ${{ env.NODEJS_VERSION }}
|
||||||
|
setup-python: "true"
|
||||||
|
|
||||||
|
- name: Make
|
||||||
|
run: make ARCH=${{ matrix.arch }} compiled-${{ matrix.arch }}.tar
|
||||||
|
env:
|
||||||
|
SCCACHE_GHA_ENABLED: on
|
||||||
|
SCCACHE_GHA_VERSION: 0
|
||||||
|
|
||||||
|
- uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: compiled-${{ matrix.arch }}.tar
|
||||||
|
path: compiled-${{ matrix.arch }}.tar
|
||||||
|
image:
|
||||||
|
name: Build Image
|
||||||
|
needs: [compile]
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
# TODO: re-add "raspberrypi" to the platform list below
|
||||||
|
platform: >-
|
||||||
|
${{
|
||||||
|
fromJson(
|
||||||
|
format(
|
||||||
|
'[
|
||||||
|
["{0}"],
|
||||||
|
["x86_64", "x86_64-nonfree", "aarch64", "aarch64-nonfree", "riscv64"]
|
||||||
|
]',
|
||||||
|
github.event.inputs.platform || 'ALL'
|
||||||
|
)
|
||||||
|
)[(github.event.inputs.platform || 'ALL') == 'ALL']
|
||||||
|
}}
|
||||||
|
runs-on: >-
|
||||||
|
${{
|
||||||
|
fromJson(
|
||||||
|
format(
|
||||||
|
'["{0}", "{1}"]',
|
||||||
|
fromJson('{
|
||||||
|
"x86_64": "ubuntu-latest",
|
||||||
|
"x86_64-nonfree": "ubuntu-latest",
|
||||||
|
"aarch64": "ubuntu-24.04-arm",
|
||||||
|
"aarch64-nonfree": "ubuntu-24.04-arm",
|
||||||
|
"raspberrypi": "ubuntu-24.04-arm",
|
||||||
|
"riscv64": "ubuntu-24.04-arm",
|
||||||
|
}')[matrix.platform],
|
||||||
|
fromJson('{
|
||||||
|
"x86_64": "buildjet-8vcpu-ubuntu-2204",
|
||||||
|
"x86_64-nonfree": "buildjet-8vcpu-ubuntu-2204",
|
||||||
|
"aarch64": "buildjet-8vcpu-ubuntu-2204-arm",
|
||||||
|
"aarch64-nonfree": "buildjet-8vcpu-ubuntu-2204-arm",
|
||||||
|
"raspberrypi": "buildjet-8vcpu-ubuntu-2204-arm",
|
||||||
|
"riscv64": "buildjet-8vcpu-ubuntu-2204",
|
||||||
|
}')[matrix.platform]
|
||||||
|
)
|
||||||
|
)[github.event.inputs.runner == 'fast']
|
||||||
|
}}
|
||||||
|
env:
|
||||||
|
ARCH: >-
|
||||||
|
${{
|
||||||
|
fromJson('{
|
||||||
|
"x86_64": "x86_64",
|
||||||
|
"x86_64-nonfree": "x86_64",
|
||||||
|
"aarch64": "aarch64",
|
||||||
|
"aarch64-nonfree": "aarch64",
|
||||||
|
"raspberrypi": "aarch64",
|
||||||
|
"riscv64": "riscv64",
|
||||||
|
}')[matrix.platform]
|
||||||
|
}}
|
||||||
|
steps:
|
||||||
|
- name: Free space
|
||||||
|
run: |
|
||||||
|
sudo apt-get remove --purge -y azure-cli || true
|
||||||
|
sudo apt-get remove --purge -y firefox || true
|
||||||
|
sudo apt-get remove --purge -y ghc-* || true
|
||||||
|
sudo apt-get remove --purge -y google-cloud-sdk || true
|
||||||
|
sudo apt-get remove --purge -y google-chrome-stable || true
|
||||||
|
sudo apt-get remove --purge -y powershell || true
|
||||||
|
sudo apt-get remove --purge -y php* || true
|
||||||
|
sudo apt-get remove --purge -y ruby* || true
|
||||||
|
sudo apt-get remove --purge -y mono-* || true
|
||||||
|
sudo apt-get autoremove -y
|
||||||
|
sudo apt-get clean
|
||||||
|
sudo rm -rf /usr/lib/jvm # All JDKs
|
||||||
|
sudo rm -rf /usr/local/.ghcup # Haskell toolchain
|
||||||
|
sudo rm -rf /usr/local/lib/android # Android SDK/NDK, emulator
|
||||||
|
sudo rm -rf /usr/share/dotnet # .NET SDKs
|
||||||
|
sudo rm -rf /usr/share/swift # Swift toolchain (if present)
|
||||||
|
sudo rm -rf "$AGENT_TOOLSDIRECTORY" # Pre-cached tool cache (Go, Node, etc.)
|
||||||
|
if: ${{ github.event.inputs.runner != 'fast' }}
|
||||||
|
|
||||||
|
# BuildJet runners lack /opt/hostedtoolcache, which setup-qemu expects
|
||||||
|
- name: Ensure hostedtoolcache exists
|
||||||
|
run: sudo mkdir -p /opt/hostedtoolcache && sudo chown $USER:$USER /opt/hostedtoolcache
|
||||||
|
|
||||||
|
- name: Set up docker QEMU
|
||||||
|
uses: docker/setup-qemu-action@v3
|
||||||
|
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
submodules: recursive
|
||||||
|
|
||||||
|
- name: Download compiled artifacts
|
||||||
|
uses: actions/download-artifact@v4
|
||||||
|
with:
|
||||||
|
name: compiled-${{ env.ARCH }}.tar
|
||||||
|
|
||||||
|
- name: Extract compiled artifacts
|
||||||
|
run: tar -xvf compiled-${{ env.ARCH }}.tar
|
||||||
|
|
||||||
|
- name: Prevent rebuild of compiled artifacts
|
||||||
|
run: |
|
||||||
|
mkdir -p web/node_modules
|
||||||
|
mkdir -p web/dist/raw
|
||||||
|
mkdir -p core/bindings
|
||||||
|
mkdir -p sdk/base/lib/osBindings
|
||||||
|
mkdir -p container-runtime/node_modules
|
||||||
|
mkdir -p container-runtime/dist
|
||||||
|
mkdir -p container-runtime/dist/node_modules
|
||||||
|
mkdir -p sdk/dist
|
||||||
|
mkdir -p sdk/baseDist
|
||||||
|
mkdir -p patch-db/client/node_modules
|
||||||
|
mkdir -p patch-db/client/dist
|
||||||
|
mkdir -p web/.angular
|
||||||
|
mkdir -p web/dist/raw/ui
|
||||||
|
mkdir -p web/dist/raw/setup-wizard
|
||||||
|
mkdir -p web/dist/static/ui
|
||||||
|
mkdir -p web/dist/static/setup-wizard
|
||||||
|
PLATFORM=${{ matrix.platform }} make -t compiled-${{ env.ARCH }}.tar
|
||||||
|
|
||||||
|
- run: git status
|
||||||
|
|
||||||
|
- name: Run iso build
|
||||||
|
run: PLATFORM=${{ matrix.platform }} make iso
|
||||||
|
if: ${{ matrix.platform != 'raspberrypi' }}
|
||||||
|
|
||||||
|
- name: Run img build
|
||||||
|
run: PLATFORM=${{ matrix.platform }} make img
|
||||||
|
if: ${{ matrix.platform == 'raspberrypi' }}
|
||||||
|
|
||||||
|
- uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: ${{ matrix.platform }}.squashfs
|
||||||
|
path: results/*.squashfs
|
||||||
|
|
||||||
|
- uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: ${{ matrix.platform }}.iso
|
||||||
|
path: results/*.iso
|
||||||
|
if: ${{ matrix.platform != 'raspberrypi' }}
|
||||||
|
|
||||||
|
- uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: ${{ matrix.platform }}.img
|
||||||
|
path: results/*.img
|
||||||
|
if: ${{ matrix.platform == 'raspberrypi' }}
|
||||||
38
.github/workflows/test.yaml
vendored
Normal file
@@ -0,0 +1,38 @@
|
|||||||
|
name: Automated Tests
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- master
|
||||||
|
- next/*
|
||||||
|
pull_request:
|
||||||
|
branches:
|
||||||
|
- master
|
||||||
|
- next/*
|
||||||
|
|
||||||
|
concurrency:
|
||||||
|
group: ${{ github.workflow }}-${{ github.head_ref || github.ref }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
|
||||||
|
env:
|
||||||
|
NODEJS_VERSION: "24.11.0"
|
||||||
|
ENVIRONMENT: dev-unstable
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
test:
|
||||||
|
name: Run Automated Tests
|
||||||
|
if: github.event.pull_request.draft != true
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
submodules: recursive
|
||||||
|
- uses: ./.github/actions/setup-build
|
||||||
|
with:
|
||||||
|
nodejs-version: ${{ env.NODEJS_VERSION }}
|
||||||
|
free-space: "false"
|
||||||
|
setup-docker: "false"
|
||||||
|
setup-sccache: "false"
|
||||||
|
|
||||||
|
- name: Build And Run Tests
|
||||||
|
run: make test
|
||||||
26
.gitignore
vendored
@@ -1,18 +1,24 @@
|
|||||||
.DS_Store
|
.DS_Store
|
||||||
.idea
|
.idea
|
||||||
/*.img
|
*.img
|
||||||
/*.img.gz
|
*.img.gz
|
||||||
/*.img.xz
|
*.img.xz
|
||||||
/*-raspios-bullseye-arm64-lite.img
|
*.zip
|
||||||
/*-raspios-bullseye-arm64-lite.zip
|
|
||||||
/product_key.txt
|
/product_key.txt
|
||||||
/*_product_key.txt
|
/*_product_key.txt
|
||||||
.vscode/settings.json
|
.vscode/settings.json
|
||||||
deploy_web.sh
|
deploy_web.sh
|
||||||
deploy_web.sh
|
|
||||||
secrets.db
|
secrets.db
|
||||||
.vscode/
|
.vscode/
|
||||||
/cargo-deps/**/*
|
/build/env/*.txt
|
||||||
/ENVIRONMENT.txt
|
*.deb
|
||||||
/GIT_HASH.txt
|
/target
|
||||||
/eos.tar.gz
|
*.squashfs
|
||||||
|
/results
|
||||||
|
/dpkg-workdir
|
||||||
|
/compiled.tar
|
||||||
|
/compiled-*.tar
|
||||||
|
/build/lib/firmware
|
||||||
|
tmp
|
||||||
|
web/.i18n-checked
|
||||||
|
agents/USER.md
|
||||||
|
|||||||
146
CLAUDE.md
Normal file
@@ -0,0 +1,146 @@
|
|||||||
|
# CLAUDE.md
|
||||||
|
|
||||||
|
This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository.
|
||||||
|
|
||||||
|
## Project Overview
|
||||||
|
|
||||||
|
StartOS is an open-source Linux distribution for running personal servers. It manages discovery, installation, network configuration, backups, and health monitoring of self-hosted services.
|
||||||
|
|
||||||
|
**Tech Stack:**
|
||||||
|
- Backend: Rust (async/Tokio, Axum web framework)
|
||||||
|
- Frontend: Angular 20 + TypeScript + TaigaUI
|
||||||
|
- Container runtime: Node.js/TypeScript with LXC
|
||||||
|
- Database/State: Patch-DB (git submodule) - storage layer with reactive frontend sync
|
||||||
|
- API: JSON-RPC via rpc-toolkit (see `agents/rpc-toolkit.md`)
|
||||||
|
- Auth: Password + session cookie, public/private key signatures, local authcookie (see `core/src/middleware/auth/`)
|
||||||
|
|
||||||
|
## Build & Development
|
||||||
|
|
||||||
|
See [CONTRIBUTING.md](CONTRIBUTING.md) for:
|
||||||
|
- Environment setup and requirements
|
||||||
|
- Build commands and make targets
|
||||||
|
- Testing and formatting commands
|
||||||
|
- Environment variables
|
||||||
|
|
||||||
|
**Quick reference:**
|
||||||
|
```bash
|
||||||
|
. ./devmode.sh # Enable dev mode
|
||||||
|
make update-startbox REMOTE=start9@<ip> # Fastest iteration (binary + UI)
|
||||||
|
make test-core # Run Rust tests
|
||||||
|
```
|
||||||
|
|
||||||
|
## Architecture
|
||||||
|
|
||||||
|
### Core (`/core`)
|
||||||
|
The Rust backend daemon. Main binaries:
|
||||||
|
- `startbox` - Main daemon (runs as `startd`)
|
||||||
|
- `start-cli` - CLI interface
|
||||||
|
- `start-container` - Runs inside LXC containers; communicates with host and manages subcontainers
|
||||||
|
- `registrybox` - Registry daemon
|
||||||
|
- `tunnelbox` - VPN/tunnel daemon
|
||||||
|
|
||||||
|
**Key modules:**
|
||||||
|
- `src/context/` - Context types (RpcContext, CliContext, InitContext, DiagnosticContext)
|
||||||
|
- `src/service/` - Service lifecycle management with actor pattern (`service_actor.rs`)
|
||||||
|
- `src/db/model/` - Patch-DB models (`public.rs` synced to frontend, `private.rs` backend-only)
|
||||||
|
- `src/net/` - Networking (DNS, ACME, WiFi, Tor via Arti, WireGuard)
|
||||||
|
- `src/s9pk/` - S9PK package format (merkle archive)
|
||||||
|
- `src/registry/` - Package registry management
|
||||||
|
|
||||||
|
**RPC Pattern:** See `agents/rpc-toolkit.md`
|
||||||
|
|
||||||
|
### Web (`/web`)
|
||||||
|
Angular projects sharing common code:
|
||||||
|
- `projects/ui/` - Main admin interface
|
||||||
|
- `projects/setup-wizard/` - Initial setup
|
||||||
|
- `projects/start-tunnel/` - VPN management UI
|
||||||
|
- `projects/shared/` - Common library (API clients, components)
|
||||||
|
- `projects/marketplace/` - Service discovery
|
||||||
|
|
||||||
|
**Development:**
|
||||||
|
```bash
|
||||||
|
cd web
|
||||||
|
npm ci
|
||||||
|
npm run start:ui # Dev server with mocks
|
||||||
|
npm run build:ui # Production build
|
||||||
|
npm run check # Type check all projects
|
||||||
|
```
|
||||||
|
|
||||||
|
### Container Runtime (`/container-runtime`)
|
||||||
|
Node.js runtime that manages service containers via RPC. See `RPCSpec.md` for protocol.
|
||||||
|
|
||||||
|
**Container Architecture:**
|
||||||
|
```
|
||||||
|
LXC Container (uniform base for all services)
|
||||||
|
└── systemd
|
||||||
|
└── container-runtime.service
|
||||||
|
└── Loads /usr/lib/startos/package/index.js (from s9pk javascript.squashfs)
|
||||||
|
└── Package JS launches subcontainers (from images in s9pk)
|
||||||
|
```
|
||||||
|
|
||||||
|
The container runtime communicates with the host via JSON-RPC over Unix socket. Package JavaScript must export functions conforming to the `ABI` type defined in `sdk/base/lib/types.ts`.
|
||||||
|
|
||||||
|
**`/media/startos/` directory (mounted by host into container):**
|
||||||
|
|
||||||
|
| Path | Description |
|
||||||
|
|------|-------------|
|
||||||
|
| `volumes/<name>/` | Package data volumes (id-mapped, persistent) |
|
||||||
|
| `assets/` | Read-only assets from s9pk `assets.squashfs` |
|
||||||
|
| `images/<name>/` | Container images (squashfs, used for subcontainers) |
|
||||||
|
| `images/<name>.env` | Environment variables for image |
|
||||||
|
| `images/<name>.json` | Image metadata |
|
||||||
|
| `backup/` | Backup mount point (mounted during backup operations) |
|
||||||
|
| `rpc/service.sock` | RPC socket (container runtime listens here) |
|
||||||
|
| `rpc/host.sock` | Host RPC socket (for effects callbacks to host) |
|
||||||
|
|
||||||
|
**S9PK Structure:** See `agents/s9pk-structure.md`
|
||||||
|
|
||||||
|
### SDK (`/sdk`)
|
||||||
|
TypeScript SDK for packaging services (`@start9labs/start-sdk`).
|
||||||
|
|
||||||
|
- `base/` - Core types, ABI definitions, effects interface (`@start9labs/start-sdk-base`)
|
||||||
|
- `package/` - Full SDK for package developers, re-exports base
|
||||||
|
|
||||||
|
### Patch-DB (`/patch-db`)
|
||||||
|
Git submodule providing diff-based state synchronization. Changes to `db/model/public.rs` automatically sync to the frontend.
|
||||||
|
|
||||||
|
**Key patterns:**
|
||||||
|
- `db.peek().await` - Get a read-only snapshot of the database state
|
||||||
|
- `db.mutate(|db| { ... }).await` - Apply mutations atomically, returns `MutateResult`
|
||||||
|
- `#[derive(HasModel)]` - Derive macro for types stored in the database, generates typed accessors
|
||||||
|
|
||||||
|
**Generated accessor types** (from `HasModel` derive):
|
||||||
|
- `as_field()` - Immutable reference: `&Model<T>`
|
||||||
|
- `as_field_mut()` - Mutable reference: `&mut Model<T>`
|
||||||
|
- `into_field()` - Owned value: `Model<T>`
|
||||||
|
|
||||||
|
**`Model<T>` APIs** (from `db/prelude.rs`):
|
||||||
|
- `.de()` - Deserialize to `T`
|
||||||
|
- `.ser(&value)` - Serialize from `T`
|
||||||
|
- `.mutate(|v| ...)` - Deserialize, mutate, reserialize
|
||||||
|
- For maps: `.keys()`, `.as_idx(&key)`, `.as_idx_mut(&key)`, `.insert()`, `.remove()`, `.contains_key()`
|
||||||
|
|
||||||
|
## Supplementary Documentation
|
||||||
|
|
||||||
|
The `agents/` directory contains detailed documentation for AI assistants:
|
||||||
|
|
||||||
|
- `TODO.md` - Pending tasks for AI agents (check this first, remove items when completed)
|
||||||
|
- `USER.md` - Current user identifier (gitignored, see below)
|
||||||
|
- `rpc-toolkit.md` - JSON-RPC patterns and handler configuration
|
||||||
|
- `core-rust-patterns.md` - Common utilities and patterns for Rust code in `/core` (guard pattern, mount guards, etc.)
|
||||||
|
- `s9pk-structure.md` - S9PK package format structure
|
||||||
|
- `i18n-patterns.md` - Internationalization key conventions and usage in `/core`
|
||||||
|
|
||||||
|
### Session Startup
|
||||||
|
|
||||||
|
On startup:
|
||||||
|
|
||||||
|
1. **Check for `agents/USER.md`** - If it doesn't exist, prompt the user for their name/identifier and create it. This file is gitignored since it varies per developer.
|
||||||
|
|
||||||
|
2. **Check `agents/TODO.md` for relevant tasks** - Show TODOs that either:
|
||||||
|
- Have no `@username` tag (relevant to everyone)
|
||||||
|
- Are tagged with the current user's identifier
|
||||||
|
|
||||||
|
Skip TODOs tagged with a different user.
|
||||||
|
|
||||||
|
3. **Ask "What would you like to do today?"** - Offer options for each relevant TODO item, plus "Something else" for other requests.
|
||||||
362
CONTRIBUTING.md
@@ -1,208 +1,200 @@
|
|||||||
<!-- omit in toc -->
|
# Contributing to StartOS
|
||||||
# Contributing to Embassy OS
|
|
||||||
|
|
||||||
First off, thanks for taking the time to contribute! ❤️
|
This guide is for contributing to the StartOS. If you are interested in packaging a service for StartOS, visit the [service packaging guide](https://docs.start9.com/latest/packaging-guide/). If you are interested in promoting, providing technical support, creating tutorials, or helping in other ways, please visit the [Start9 website](https://start9.com/contribute).
|
||||||
|
|
||||||
All types of contributions are encouraged and valued. See the [Table of Contents](#table-of-contents) for different ways to help and details about how this project handles them. Please make sure to read the relevant section before making your contribution. It will make it a lot easier for us maintainers and smooth out the experience for all involved. The community looks forward to your contributions. 🎉
|
## Collaboration
|
||||||
|
|
||||||
> And if you like the project, but just don't have time to contribute, that's fine. There are other easy ways to support the project and show your appreciation, which we would also be very happy about:
|
- [Matrix](https://matrix.to/#/#community-dev:matrix.start9labs.com)
|
||||||
> - Star the project
|
- [Telegram](https://t.me/start9_labs/47471)
|
||||||
> - Tweet about it
|
|
||||||
> - Refer this project in your project's readme
|
|
||||||
> - Mention the project at local meetups and tell your friends/colleagues
|
|
||||||
> - Buy an [Embassy](https://start9labs.com)
|
|
||||||
|
|
||||||
<!-- omit in toc -->
|
## Project Structure
|
||||||
## Table of Contents
|
|
||||||
|
|
||||||
- [I Have a Question](#i-have-a-question)
|
```bash
|
||||||
- [I Want To Contribute](#i-want-to-contribute)
|
/
|
||||||
- [Reporting Bugs](#reporting-bugs)
|
├── assets/ # Screenshots for README
|
||||||
- [Suggesting Enhancements](#suggesting-enhancements)
|
├── build/ # Auxiliary files and scripts for deployed images
|
||||||
- [Project Structure](#project-structure)
|
├── container-runtime/ # Node.js program managing package containers
|
||||||
- [Your First Code Contribution](#your-first-code-contribution)
|
├── core/ # Rust backend: API, daemon (startd), CLI (start-cli)
|
||||||
- [Setting Up Your Development Environment](#setting-up-your-development-environment)
|
├── debian/ # Debian package maintainer scripts
|
||||||
- [Building The Image](#building-the-image)
|
├── image-recipe/ # Scripts for building StartOS images
|
||||||
- [Improving The Documentation](#improving-the-documentation)
|
├── patch-db/ # (submodule) Diff-based data store for frontend sync
|
||||||
- [Styleguides](#styleguides)
|
├── sdk/ # TypeScript SDK for building StartOS packages
|
||||||
- [Formatting](#formatting)
|
└── web/ # Web UIs (Angular)
|
||||||
- [Atomic Commits](#atomic-commits)
|
|
||||||
- [Commit Messages](#commit-messages)
|
|
||||||
- [Pull Requests](#pull-requests)
|
|
||||||
- [Rebasing Changes](#rebasing-changes)
|
|
||||||
- [Join The Discussion](#join-the-discussion)
|
|
||||||
- [Join The Project Team](#join-the-project-team)
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
## I Have a Question
|
|
||||||
|
|
||||||
> If you want to ask a question, we assume that you have read the available [Documentation](https://docs.start9labs.com).
|
|
||||||
|
|
||||||
Before you ask a question, it is best to search for existing [Issues](https://github.com/Start9Labs/embassy-os/issues) that might help you. In case you have found a suitable issue and still need clarification, you can write your question in this issue. It is also advisable to search the internet for answers first.
|
|
||||||
|
|
||||||
If you then still feel the need to ask a question and need clarification, we recommend the following:
|
|
||||||
|
|
||||||
- Open an [Issue](https://github.com/Start9Labs/embassy-os/issues/new).
|
|
||||||
- Provide as much context as you can about what you're running into.
|
|
||||||
- Provide project and platform versions, depending on what seems relevant.
|
|
||||||
|
|
||||||
We will then take care of the issue as soon as possible.
|
|
||||||
|
|
||||||
<!--
|
|
||||||
You might want to create a separate issue tag for questions and include it in this description. People should then tag their issues accordingly.
|
|
||||||
|
|
||||||
Depending on how large the project is, you may want to outsource the questioning, e.g. to Stack Overflow or Gitter. You may add additional contact and information possibilities:
|
|
||||||
- IRC
|
|
||||||
- Slack
|
|
||||||
- Gitter
|
|
||||||
- Stack Overflow tag
|
|
||||||
- Blog
|
|
||||||
- FAQ
|
|
||||||
- Roadmap
|
|
||||||
- E-Mail List
|
|
||||||
- Forum
|
|
||||||
-->
|
|
||||||
|
|
||||||
## I Want To Contribute
|
|
||||||
|
|
||||||
> ### Legal Notice <!-- omit in toc -->
|
|
||||||
> When contributing to this project, you must agree that you have authored 100% of the content, that you have the necessary rights to the content and that the content you contribute may be provided under the project license.
|
|
||||||
|
|
||||||
### Reporting Bugs
|
|
||||||
|
|
||||||
<!-- omit in toc -->
|
|
||||||
#### Before Submitting a Bug Report
|
|
||||||
|
|
||||||
A good bug report shouldn't leave others needing to chase you up for more information. Therefore, we ask you to investigate carefully, collect information and describe the issue in detail in your report. Please complete the following steps in advance to help us fix any potential bug as fast as possible.
|
|
||||||
|
|
||||||
- Make sure that you are using the latest version.
|
|
||||||
- Determine if your bug is really a bug and not an error on your side e.g. using incompatible environment components/versions (Make sure that you have read the [documentation](https://start9.com/latest/user-manual). If you are looking for support, you might want to check [this section](#i-have-a-question)).
|
|
||||||
- To see if other users have experienced (and potentially already solved) the same issue you are having, check if there is not already a bug report existing for your bug or error in the [bug tracker](https://github.com/Start9Labs/embassy-os/issues?q=label%3Abug).
|
|
||||||
- Also make sure to search the internet (including Stack Overflow) to see if users outside of the GitHub community have discussed the issue.
|
|
||||||
- Collect information about the bug:
|
|
||||||
- Stack trace (Traceback)
|
|
||||||
- Client OS, Platform and Version (Windows/Linux/macOS/iOS/Android, Firefox/Tor Browser/Consulate)
|
|
||||||
- Version of the interpreter, compiler, SDK, runtime environment, package manager, depending on what seems relevant.
|
|
||||||
- Possibly your input and the output
|
|
||||||
- Can you reliably reproduce the issue? And can you also reproduce it with older versions?
|
|
||||||
|
|
||||||
<!-- omit in toc -->
|
|
||||||
#### How Do I Submit a Good Bug Report?
|
|
||||||
|
|
||||||
> You must never report security related issues, vulnerabilities or bugs to the issue tracker, or elsewhere in public. Instead sensitive bugs must be sent by email to <security@start9labs.com>.
|
|
||||||
<!-- You may add a PGP key to allow the messages to be sent encrypted as well. -->
|
|
||||||
|
|
||||||
We use GitHub issues to track bugs and errors. If you run into an issue with the project:
|
|
||||||
|
|
||||||
- Open an [Issue](https://github.com/Start9Labs/embassy-os/issues/new/choose) selecting the appropriate type.
|
|
||||||
- Explain the behavior you would expect and the actual behavior.
|
|
||||||
- Please provide as much context as possible and describe the *reproduction steps* that someone else can follow to recreate the issue on their own. This usually includes your code. For good bug reports you should isolate the problem and create a reduced test case.
|
|
||||||
- Provide the information you collected in the previous section.
|
|
||||||
|
|
||||||
Once it's filed:
|
|
||||||
|
|
||||||
- The project team will label the issue accordingly.
|
|
||||||
- A team member will try to reproduce the issue with your provided steps. If there are no reproduction steps or no obvious way to reproduce the issue, the team will ask you for those steps and mark the issue as `Question`. Bugs with the `Question` tag will not be addressed until they are answered.
|
|
||||||
- If the team is able to reproduce the issue, it will be marked a scoping level tag, as well as possibly other tags (such as `Security`), and the issue will be left to be [implemented by someone](#your-first-code-contribution).
|
|
||||||
|
|
||||||
<!-- You might want to create an issue template for bugs and errors that can be used as a guide and that defines the structure of the information to be included. If you do so, reference it here in the description. -->
|
|
||||||
|
|
||||||
|
|
||||||
### Suggesting Enhancements
|
|
||||||
|
|
||||||
This section guides you through submitting an enhancement suggestion for Embassy OS, **including completely new features and minor improvements to existing functionality**. Following these guidelines will help maintainers and the community to understand your suggestion and find related suggestions.
|
|
||||||
|
|
||||||
<!-- omit in toc -->
|
|
||||||
#### Before Submitting an Enhancement
|
|
||||||
|
|
||||||
- Make sure that you are using the latest version.
|
|
||||||
- Read the [documentation](https://start9.com/latest/user-manual) carefully and find out if the functionality is already covered, maybe by an individual configuration.
|
|
||||||
- Perform a [search](https://github.com/Start9Labs/embassy-os/issues) to see if the enhancement has already been suggested. If it has, add a comment to the existing issue instead of opening a new one.
|
|
||||||
- Find out whether your idea fits with the scope and aims of the project. It's up to you to make a strong case to convince the project's developers of the merits of this feature. Keep in mind that we want features that will be useful to the majority of our users and not just a small subset. If you're just targeting a minority of users, consider writing an add-on/plugin library.
|
|
||||||
|
|
||||||
<!-- omit in toc -->
|
|
||||||
#### How Do I Submit a Good Enhancement Suggestion?
|
|
||||||
|
|
||||||
Enhancement suggestions are tracked as [GitHub issues](https://github.com/Start9Labs/embassy-os/issues).
|
|
||||||
|
|
||||||
- Use a **clear and descriptive title** for the issue to identify the suggestion.
|
|
||||||
- Provide a **step-by-step description of the suggested enhancement** in as many details as possible.
|
|
||||||
- **Describe the current behavior** and **explain which behavior you expected to see instead** and why. At this point you can also tell which alternatives do not work for you.
|
|
||||||
- You may want to **include screenshots and animated GIFs** which help you demonstrate the steps or point out the part which the suggestion is related to. You can use [this tool](https://www.cockos.com/licecap/) to record GIFs on macOS and Windows, and [this tool](https://github.com/colinkeenan/silentcast) or [this tool](https://github.com/GNOME/byzanz) on Linux. <!-- this should only be included if the project has a GUI -->
|
|
||||||
- **Explain why this enhancement would be useful** to most Embassy OS users. You may also want to point out the other projects that solved it better and which could serve as inspiration.
|
|
||||||
|
|
||||||
<!-- You might want to create an issue template for enhancement suggestions that can be used as a guide and that defines the structure of the information to be included. If you do so, reference it here in the description. -->
|
|
||||||
|
|
||||||
### Project Structure
|
|
||||||
|
|
||||||
embassyOS is composed of the following components. Please visit the README for each component to understand the dependency requirements and installation instructions.
|
|
||||||
- [`ui`](frontend/README.md) (Typescript Ionic Angular) is the code that is deployed to the browser to provide the user interface for embassyOS.
|
|
||||||
- [`backend`](backend/README.md) (Rust) is a command line utility, daemon, and software development kit that sets up and manages services and their environments, provides the interface for the ui, manages system state, and provides utilities for packaging services for embassyOS.
|
|
||||||
- `patch-db` - A diff based data store that is used to synchronize data between the front and backend.
|
|
||||||
- Notably, `patch-db` has a [client](https://github.com/Start9Labs/patch-db/tree/master/client) with its own dependency and installation requirements.
|
|
||||||
- `rpc-toolkit` - A library for generating an rpc server with cli bindings from Rust functions.
|
|
||||||
- `system-images` - (Docker, Rust) A suite of utility Docker images that are preloaded with embassyOS to assist with functions relating to services (eg. configuration, backups, health checks).
|
|
||||||
- [`setup-wizard`](frontend/README.md)- Code for the user interface that is displayed during the setup and recovery process for embassyOS.
|
|
||||||
- [`diagnostic-ui`](frontend/README.md) - Code for the user interface that is displayed when something has gone wrong with starting up embassyOS, which provides helpful debugging tools.
|
|
||||||
|
|
||||||
### Your First Code Contribution
|
|
||||||
|
|
||||||
#### Setting Up Your Development Environment
|
|
||||||
|
|
||||||
First, clone the embassyOS repository and from the project root, pull in the submodules for dependent libraries.
|
|
||||||
|
|
||||||
```sh
|
|
||||||
git clone https://github.com/Start9Labs/embassy-os.git
|
|
||||||
git submodule update --init --recursive
|
|
||||||
```
|
```
|
||||||
|
|
||||||
Depending on which component of the ecosystem you are interested in contributing to, follow the installation requirements listed in that component's README (linked [above](#project-structure))
|
See component READMEs for details:
|
||||||
|
- [`core`](core/README.md)
|
||||||
|
- [`web`](web/README.md)
|
||||||
|
- [`build`](build/README.md)
|
||||||
|
- [`patch-db`](https://github.com/Start9Labs/patch-db)
|
||||||
|
|
||||||
#### Building The Image
|
## Environment Setup
|
||||||
This step is for setting up an environment in which to test your code changes if you do not yet have a embassyOS.
|
|
||||||
|
|
||||||
- Requirements
|
```sh
|
||||||
- `ext4fs` (available if running on the Linux kernel)
|
git clone https://github.com/Start9Labs/start-os.git --recurse-submodules
|
||||||
- [Docker](https://docs.docker.com/get-docker/)
|
cd start-os
|
||||||
- GNU Make
|
```
|
||||||
- Building
|
|
||||||
- see setup instructions [here](build/README.md)
|
|
||||||
- run `make` from the project root
|
|
||||||
|
|
||||||
### Improving The Documentation
|
### Development Mode
|
||||||
You can find the repository for Start9's documentation [here](https://github.com/Start9Labs/documentation). If there is something you would like to see added, let us know, or create an issue yourself. Welcome are contributions for lacking or incorrect information, broken links, requested additions, or general style improvements.
|
|
||||||
|
|
||||||
Contributions in the form of setup guides for integrations with external applications are highly encouraged. If you struggled through a process and would like to share your steps with others, check out the docs for each [service](https://github.com/Start9Labs/documentation/blob/master/source/user-manuals/available-services/index.rst) we support. The wrapper repos contain sections for adding integration guides, such as this [one](https://github.com/Start9Labs/bitcoind-wrapper/tree/master/docs). These not only help out others in the community, but inform how we can create a more seamless and intuitive experience.
|
For faster iteration during development:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
. ./devmode.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
This sets `ENVIRONMENT=dev` and `GIT_BRANCH_AS_HASH=1` to prevent rebuilds on every commit.
|
||||||
|
|
||||||
|
## Building
|
||||||
|
|
||||||
|
All builds can be performed on any operating system that can run Docker.
|
||||||
|
|
||||||
|
This project uses [GNU Make](https://www.gnu.org/software/make/) to build its components.
|
||||||
|
|
||||||
|
### Requirements
|
||||||
|
|
||||||
|
- [GNU Make](https://www.gnu.org/software/make/)
|
||||||
|
- [Docker](https://docs.docker.com/get-docker/) or [Podman](https://podman.io/)
|
||||||
|
- [NodeJS v20.16.0](https://docs.npmjs.com/downloading-and-installing-node-js-and-npm)
|
||||||
|
- [Rust](https://rustup.rs/) (nightly for formatting)
|
||||||
|
- [sed](https://www.gnu.org/software/sed/), [grep](https://www.gnu.org/software/grep/), [awk](https://www.gnu.org/software/gawk/)
|
||||||
|
- [jq](https://jqlang.github.io/jq/)
|
||||||
|
- [gzip](https://www.gnu.org/software/gzip/), [brotli](https://github.com/google/brotli)
|
||||||
|
|
||||||
|
### Environment Variables
|
||||||
|
|
||||||
|
| Variable | Description |
|
||||||
|
|----------|-------------|
|
||||||
|
| `PLATFORM` | Target platform: `x86_64`, `x86_64-nonfree`, `aarch64`, `aarch64-nonfree`, `riscv64`, `raspberrypi` |
|
||||||
|
| `ENVIRONMENT` | Hyphen-separated feature flags (see below) |
|
||||||
|
| `PROFILE` | Build profile: `release` (default) or `dev` |
|
||||||
|
| `GIT_BRANCH_AS_HASH` | Set to `1` to use git branch name as version hash (avoids rebuilds) |
|
||||||
|
|
||||||
|
**ENVIRONMENT flags:**
|
||||||
|
- `dev` - Enables password SSH before setup, skips frontend compression
|
||||||
|
- `unstable` - Enables assertions and debugging with performance penalty
|
||||||
|
- `console` - Enables tokio-console for async debugging
|
||||||
|
|
||||||
|
**Platform notes:**
|
||||||
|
- `-nonfree` variants include proprietary firmware and drivers
|
||||||
|
- `raspberrypi` includes non-free components by necessity
|
||||||
|
- Platform is remembered between builds if not specified
|
||||||
|
|
||||||
|
### Make Targets
|
||||||
|
|
||||||
|
#### Building
|
||||||
|
|
||||||
|
| Target | Description |
|
||||||
|
|--------|-------------|
|
||||||
|
| `iso` | Create full `.iso` image (not for raspberrypi) |
|
||||||
|
| `img` | Create full `.img` image (raspberrypi only) |
|
||||||
|
| `deb` | Build Debian package |
|
||||||
|
| `all` | Build all Rust binaries |
|
||||||
|
| `uis` | Build all web UIs |
|
||||||
|
| `ui` | Build main UI only |
|
||||||
|
| `ts-bindings` | Generate TypeScript bindings from Rust types |
|
||||||
|
|
||||||
|
#### Deploying to Device
|
||||||
|
|
||||||
|
For devices on the same network:
|
||||||
|
|
||||||
|
| Target | Description |
|
||||||
|
|--------|-------------|
|
||||||
|
| `update-startbox REMOTE=start9@<ip>` | Deploy binary + UI only (fastest) |
|
||||||
|
| `update-deb REMOTE=start9@<ip>` | Deploy full Debian package |
|
||||||
|
| `update REMOTE=start9@<ip>` | OTA-style update |
|
||||||
|
| `reflash REMOTE=start9@<ip>` | Reflash as if using live ISO |
|
||||||
|
| `update-overlay REMOTE=start9@<ip>` | Deploy to in-memory overlay (reverts on reboot) |
|
||||||
|
|
||||||
|
For devices on different networks (uses [magic-wormhole](https://github.com/magic-wormhole/magic-wormhole)):
|
||||||
|
|
||||||
|
| Target | Description |
|
||||||
|
|--------|-------------|
|
||||||
|
| `wormhole` | Send startbox binary |
|
||||||
|
| `wormhole-deb` | Send Debian package |
|
||||||
|
| `wormhole-squashfs` | Send squashfs image |
|
||||||
|
|
||||||
|
#### Other
|
||||||
|
|
||||||
|
| Target | Description |
|
||||||
|
|--------|-------------|
|
||||||
|
| `format` | Run code formatting (Rust nightly required) |
|
||||||
|
| `test` | Run all automated tests |
|
||||||
|
| `test-core` | Run Rust tests |
|
||||||
|
| `test-sdk` | Run SDK tests |
|
||||||
|
| `test-container-runtime` | Run container runtime tests |
|
||||||
|
| `clean` | Delete all compiled artifacts |
|
||||||
|
|
||||||
|
## Testing
|
||||||
|
|
||||||
|
```bash
|
||||||
|
make test # All tests
|
||||||
|
make test-core # Rust tests (via ./core/run-tests.sh)
|
||||||
|
make test-sdk # SDK tests
|
||||||
|
make test-container-runtime # Container runtime tests
|
||||||
|
|
||||||
|
# Run specific Rust test
|
||||||
|
cd core && cargo test <test_name> --features=test
|
||||||
|
```
|
||||||
|
|
||||||
|
## Code Formatting
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Rust (requires nightly)
|
||||||
|
make format
|
||||||
|
|
||||||
|
# TypeScript/HTML/SCSS (web)
|
||||||
|
cd web && npm run format
|
||||||
|
```
|
||||||
|
|
||||||
|
## Code Style Guidelines
|
||||||
|
|
||||||
## Styleguides
|
|
||||||
### Formatting
|
### Formatting
|
||||||
Each component of embassyOS contains its own style guide. Code must be formatted with the formatter designated for each component. These are outlined within each component folder's README.
|
|
||||||
|
|
||||||
### Atomic Commits
|
Run the formatters before committing. Configuration is handled by `rustfmt.toml` (Rust) and prettier configs (TypeScript).
|
||||||
Commits [should be atomic](https://en.wikipedia.org/wiki/Atomic_commit#Atomic_commit_convention) and diffs should be easy to read.
|
|
||||||
Do not mix any formatting fixes or code moves with actual code changes.
|
### Documentation & Comments
|
||||||
|
|
||||||
|
**Rust:**
|
||||||
|
- Add doc comments (`///`) to public APIs, structs, and non-obvious functions
|
||||||
|
- Use `//` comments sparingly for complex logic that isn't self-evident
|
||||||
|
- Prefer self-documenting code (clear naming, small functions) over comments
|
||||||
|
|
||||||
|
**TypeScript:**
|
||||||
|
- Document exported functions and complex types with JSDoc
|
||||||
|
- Keep comments focused on "why" rather than "what"
|
||||||
|
|
||||||
|
**General:**
|
||||||
|
- Don't add comments that just restate the code
|
||||||
|
- Update or remove comments when code changes
|
||||||
|
- TODOs should include context: `// TODO(username): reason`
|
||||||
|
|
||||||
### Commit Messages
|
### Commit Messages
|
||||||
If a commit touches only 1 component, prefix the message with the affected component. i.e. `backend: update to tokio v0.3`.
|
|
||||||
|
|
||||||
### Pull Requests
|
Use [Conventional Commits](https://www.conventionalcommits.org/):
|
||||||
The body of a pull request should contain sufficient description of what the changes do, as well as a justification.
|
|
||||||
You should include references to any relevant [issues](https://github.com/Start9Labs/embassy-os/issues).
|
|
||||||
|
|
||||||
### Rebasing Changes
|
```
|
||||||
When a pull request conflicts with the target branch, you may be asked to rebase it on top of the current target branch. The `git rebase` command will take care of rebuilding your commits on top of the new base.
|
<type>(<scope>): <description>
|
||||||
|
|
||||||
This project aims to have a clean git history, where code changes are only made in non-merge commits. This simplifies auditability because merge commits can be assumed to not contain arbitrary code changes.
|
[optional body]
|
||||||
|
|
||||||
## Join The Discussion
|
[optional footer]
|
||||||
Current or aspiring contributors? Join our community developer [Matrix channel](https://matrix.to/#/#community-dev:matrix.start9labs.com).
|
```
|
||||||
|
|
||||||
Just interested in or using the project? Join our community [Telegram](https://t.me/start9_labs) or [Matrix](https://matrix.to/#/#community:matrix.start9labs.com).
|
**Types:**
|
||||||
|
- `feat` - New feature
|
||||||
|
- `fix` - Bug fix
|
||||||
|
- `docs` - Documentation only
|
||||||
|
- `style` - Formatting, no code change
|
||||||
|
- `refactor` - Code change that neither fixes a bug nor adds a feature
|
||||||
|
- `test` - Adding or updating tests
|
||||||
|
- `chore` - Build process, dependencies, etc.
|
||||||
|
|
||||||
## Join The Project Team
|
**Examples:**
|
||||||
Interested in becoming a part of the Start9 Labs team? Send an email to <jobs@start9labs.com>
|
```
|
||||||
|
feat(web): add dark mode toggle
|
||||||
|
fix(core): resolve race condition in service startup
|
||||||
|
docs: update CONTRIBUTING.md with style guidelines
|
||||||
|
refactor(sdk): simplify package validation logic
|
||||||
|
```
|
||||||
|
|
||||||
<!-- omit in toc -->
|
|
||||||
## Attribution
|
|
||||||
This guide is based on the **contributing-gen**. [Make your own](https://github.com/bttger/contributing-gen)!
|
|
||||||
|
|||||||
134
DEVELOPMENT.md
Normal file
@@ -0,0 +1,134 @@
|
|||||||
|
# Setting up your development environment on Debian/Ubuntu
|
||||||
|
|
||||||
|
A step-by-step guide
|
||||||
|
|
||||||
|
> This is the only officially supported build environment.
|
||||||
|
> MacOS has limited build capabilities and Windows requires [WSL2](https://learn.microsoft.com/en-us/windows/wsl/install)
|
||||||
|
|
||||||
|
## Installing dependencies
|
||||||
|
|
||||||
|
Run the following commands one at a time
|
||||||
|
|
||||||
|
```sh
|
||||||
|
sudo apt update
|
||||||
|
sudo apt install -y ca-certificates curl gpg build-essential
|
||||||
|
curl -fsSL https://download.docker.com/linux/debian/gpg | sudo gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg
|
||||||
|
echo "deb [arch=$(dpkg-architecture -q DEB_HOST_ARCH) signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/debian bookworm stable" | sudo tee /etc/apt/sources.list.d/docker.list
|
||||||
|
sudo apt update
|
||||||
|
sudo apt install -y sed grep gawk jq gzip brotli containerd.io docker-ce docker-ce-cli docker-compose-plugin qemu-user-static binfmt-support squashfs-tools git debspawn rsync b3sum
|
||||||
|
sudo mkdir -p /etc/debspawn/
|
||||||
|
echo "AllowUnsafePermissions=true" | sudo tee /etc/debspawn/global.toml
|
||||||
|
sudo usermod -aG docker $USER
|
||||||
|
sudo su $USER
|
||||||
|
docker run --privileged --rm tonistiigi/binfmt --install all
|
||||||
|
docker buildx create --use
|
||||||
|
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh # proceed with default installation
|
||||||
|
curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/master/install.sh | bash
|
||||||
|
source ~/.bashrc
|
||||||
|
nvm install 24
|
||||||
|
nvm use 24
|
||||||
|
nvm alias default 24 # this prevents your machine from reverting back to another version
|
||||||
|
```
|
||||||
|
|
||||||
|
## Cloning the repository
|
||||||
|
|
||||||
|
```sh
|
||||||
|
git clone --recursive https://github.com/Start9Labs/start-os.git --branch next/major
|
||||||
|
cd start-os
|
||||||
|
```
|
||||||
|
|
||||||
|
## Building an ISO
|
||||||
|
|
||||||
|
```sh
|
||||||
|
PLATFORM=$(uname -m) ENVIRONMENT=dev make iso
|
||||||
|
```
|
||||||
|
|
||||||
|
This will build an ISO for your current architecture. If you are building to run on an architecture other than the one you are currently on, replace `$(uname -m)` with the correct platform for the device (one of `aarch64`, `aarch64-nonfree`, `x86_64`, `x86_64-nonfree`, `raspberrypi`)
|
||||||
|
|
||||||
|
## Creating a VM
|
||||||
|
|
||||||
|
### Install virt-manager
|
||||||
|
|
||||||
|
```sh
|
||||||
|
sudo apt update
|
||||||
|
sudo apt install -y virt-manager
|
||||||
|
sudo usermod -aG libvirt $USER
|
||||||
|
sudo su $USER
|
||||||
|
```
|
||||||
|
|
||||||
|
### Launch virt-manager
|
||||||
|
|
||||||
|
```sh
|
||||||
|
virt-manager
|
||||||
|
```
|
||||||
|
|
||||||
|
### Create new virtual machine
|
||||||
|
|
||||||
|

|
||||||
|

|
||||||
|

|
||||||
|

|
||||||
|
|
||||||
|
#### make sure to set "Target Path" to the path to your results directory in start-os
|
||||||
|
|
||||||
|

|
||||||
|

|
||||||
|

|
||||||
|

|
||||||
|

|
||||||
|

|
||||||
|

|
||||||
|

|
||||||
|
|
||||||
|
## Updating a VM
|
||||||
|
|
||||||
|
The fastest way to update a VM to your latest code depends on what you changed:
|
||||||
|
|
||||||
|
### UI or startd:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
PLATFORM=$(uname -m) ENVIRONMENT=dev make update-startbox REMOTE=start9@<VM IP>
|
||||||
|
```
|
||||||
|
|
||||||
|
### Container runtime or debian dependencies:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
PLATFORM=$(uname -m) ENVIRONMENT=dev make update-deb REMOTE=start9@<VM IP>
|
||||||
|
```
|
||||||
|
|
||||||
|
### Image recipe:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
PLATFORM=$(uname -m) ENVIRONMENT=dev make update-squashfs REMOTE=start9@<VM IP>
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
If the device you are building for is not available via ssh, it is also possible to use `magic-wormhole` to send the relevant files.
|
||||||
|
|
||||||
|
### Prerequisites:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
sudo apt update
|
||||||
|
sudo apt install -y magic-wormhole
|
||||||
|
```
|
||||||
|
|
||||||
|
As before, the fastest way to update a VM to your latest code depends on what you changed. Each of the following commands will return a command to paste into the shell of the device you would like to upgrade.
|
||||||
|
|
||||||
|
### UI or startd:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
PLATFORM=$(uname -m) ENVIRONMENT=dev make wormhole
|
||||||
|
```
|
||||||
|
|
||||||
|
### Container runtime or debian dependencies:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
PLATFORM=$(uname -m) ENVIRONMENT=dev make wormhole-deb
|
||||||
|
```
|
||||||
|
|
||||||
|
### Image recipe:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
PLATFORM=$(uname -m) ENVIRONMENT=dev make wormhole-squashfs
|
||||||
|
```
|
||||||
21
LICENSE
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
MIT License
|
||||||
|
|
||||||
|
Copyright (c) 2023 Start9 Labs, Inc.
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
||||||
42
LICENSE.md
@@ -1,42 +0,0 @@
|
|||||||
# START9 NON-COMMERCIAL LICENSE v1
|
|
||||||
Version 1, 22 September 2022
|
|
||||||
|
|
||||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
|
||||||
|
|
||||||
### 1.Definitions
|
|
||||||
|
|
||||||
"License" means version 1 of the Start9 Non-Commercial License.
|
|
||||||
|
|
||||||
"Licensor" means the Start9 Labs, Inc, or its successor(s) in interest, or a future assignee of the copyright.
|
|
||||||
|
|
||||||
"You" (or "Your") means an individual or organization exercising permissions granted by this License.
|
|
||||||
|
|
||||||
"Source Code" for a work means the preferred form of the work for making modifications to it.
|
|
||||||
|
|
||||||
"Object Code" means any non-source form of a work, including the machine-language output by a compiler or assembler.
|
|
||||||
|
|
||||||
"Work" means any work of authorship, whether in Source or Object form, made available under this License.
|
|
||||||
|
|
||||||
"Derivative Work" means any work, whether in Source or Object form, that is based on (or derived from) the Work.
|
|
||||||
|
|
||||||
"Distribute" means to convey or to publish and generally has the same meaning here as under U.S. Copyright law.
|
|
||||||
|
|
||||||
"Sell" means practicing any or all of the rights granted to you under the License to provide to third parties, for a fee or other consideration (including, without limitation, fees for hosting, consulting, or support services), a product or service whose value derives, entirely or substantially, from the functionality of the Work or Derivative Work.
|
|
||||||
|
|
||||||
### 2. Grant of Rights
|
|
||||||
|
|
||||||
Subject to the terms of this license, the Licensor grants you, the licensee, a non-exclusive, worldwide, royalty-free copyright license to access, audit, copy, modify, compile, run, test, distribute, or otherwise use the Software.
|
|
||||||
|
|
||||||
### 3. Limitations
|
|
||||||
|
|
||||||
1. The grant of rights under the License does NOT include, and the License does NOT grant You the right to Sell the Work or Derivative Work.
|
|
||||||
2. If you Distribute the Work or Derivative Work, you expressly undertake not to remove or modify, in any manner, the copyright notices attached to the Work or displayed in any output of the Work when run, and to reproduce these notices, in an identical manner, in any distributed copies of the Work or Derivative Work together with a copy of this License.
|
|
||||||
3. If you Distribute a Derivative Work, it must carry prominent notices stating that it has been modified from the Work, providing a relevant date.
|
|
||||||
|
|
||||||
### 4. Contributions
|
|
||||||
|
|
||||||
You hereby grant to Licensor a perpetual, irrevocable, worldwide, non-exclusive, royalty-free license to use and exploit any Derivative Work of which you are the author.
|
|
||||||
|
|
||||||
### 5. Disclaimer
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. LICENSOR HAS NO OBLIGATION TO SUPPORT RECIPIENTS OF THE SOFTWARE.
|
|
||||||
427
Makefile
@@ -1,122 +1,381 @@
|
|||||||
ARCH = aarch64
|
ls-files = $(shell git ls-files --cached --others --exclude-standard $1)
|
||||||
ENVIRONMENT_FILE := $(shell ./check-environment.sh)
|
PROFILE = release
|
||||||
GIT_HASH_FILE := $(shell ./check-git-hash.sh)
|
|
||||||
EMBASSY_BINS := backend/target/$(ARCH)-unknown-linux-gnu/release/embassyd backend/target/$(ARCH)-unknown-linux-gnu/release/embassy-init backend/target/$(ARCH)-unknown-linux-gnu/release/embassy-cli backend/target/$(ARCH)-unknown-linux-gnu/release/embassy-sdk backend/target/$(ARCH)-unknown-linux-gnu/release/avahi-alias
|
PLATFORM_FILE := $(shell ./build/env/check-platform.sh)
|
||||||
EMBASSY_UIS := frontend/dist/ui frontend/dist/setup-wizard frontend/dist/diagnostic-ui
|
ENVIRONMENT_FILE := $(shell ./build/env/check-environment.sh)
|
||||||
EMBASSY_SRC := raspios.img product_key.txt $(EMBASSY_BINS) backend/embassyd.service backend/embassy-init.service $(EMBASSY_UIS) $(shell find build)
|
GIT_HASH_FILE := $(shell ./build/env/check-git-hash.sh)
|
||||||
COMPAT_SRC := $(shell find system-images/compat/ -not -path 'system-images/compat/target/*' -and -not -name compat.tar -and -not -name target)
|
VERSION_FILE := $(shell ./build/env/check-version.sh)
|
||||||
UTILS_SRC := $(shell find system-images/utils/ -not -name utils.tar)
|
BASENAME := $(shell PROJECT=startos ./build/env/basename.sh)
|
||||||
BINFMT_SRC := $(shell find system-images/binfmt/ -not -name binfmt.tar)
|
PLATFORM := $(shell if [ -f $(PLATFORM_FILE) ]; then cat $(PLATFORM_FILE); else echo unknown; fi)
|
||||||
BACKEND_SRC := $(shell find backend/src) $(shell find backend/migrations) $(shell find patch-db/*/src) backend/Cargo.toml backend/Cargo.lock
|
ARCH := $(shell if [ "$(PLATFORM)" = "raspberrypi" ]; then echo aarch64; else echo $(PLATFORM) | sed 's/-nonfree$$//g'; fi)
|
||||||
FRONTEND_SHARED_SRC := $(shell find frontend/projects/shared) $(shell find frontend/assets) $(shell ls -p frontend/ | grep -v / | sed 's/^/frontend\//g') frontend/node_modules frontend/config.json patch-db/client/dist frontend/patchdb-ui-seed.json
|
RUST_ARCH := $(shell if [ "$(ARCH)" = "riscv64" ]; then echo riscv64gc; else echo $(ARCH); fi)
|
||||||
FRONTEND_UI_SRC := $(shell find frontend/projects/ui)
|
REGISTRY_BASENAME := $(shell PROJECT=start-registry PLATFORM=$(ARCH) ./build/env/basename.sh)
|
||||||
FRONTEND_SETUP_WIZARD_SRC := $(shell find frontend/projects/setup-wizard)
|
TUNNEL_BASENAME := $(shell PROJECT=start-tunnel PLATFORM=$(ARCH) ./build/env/basename.sh)
|
||||||
FRONTEND_DIAGNOSTIC_UI_SRC := $(shell find frontend/projects/diagnostic-ui)
|
IMAGE_TYPE=$(shell if [ "$(PLATFORM)" = raspberrypi ]; then echo img; else echo iso; fi)
|
||||||
PATCH_DB_CLIENT_SRC := $(shell find patch-db/client -not -path patch-db/client/dist)
|
WEB_UIS := web/dist/raw/ui/index.html web/dist/raw/setup-wizard/index.html
|
||||||
|
COMPRESSED_WEB_UIS := web/dist/static/ui/index.html web/dist/static/setup-wizard/index.html
|
||||||
|
FIRMWARE_ROMS := build/lib/firmware/$(PLATFORM) $(shell jq --raw-output '.[] | select(.platform[] | contains("$(PLATFORM)")) | "./build/lib/firmware/$(PLATFORM)/" + .id + ".rom.gz"' build/lib/firmware.json)
|
||||||
|
BUILD_SRC := $(call ls-files, build/lib) build/lib/depends build/lib/conflicts $(FIRMWARE_ROMS)
|
||||||
|
IMAGE_RECIPE_SRC := $(call ls-files, build/image-recipe/)
|
||||||
|
STARTD_SRC := core/startd.service $(BUILD_SRC)
|
||||||
|
CORE_SRC := $(call ls-files, core) $(shell git ls-files --recurse-submodules patch-db) $(GIT_HASH_FILE)
|
||||||
|
WEB_SHARED_SRC := $(call ls-files, web/projects/shared) $(call ls-files, web/projects/marketplace) $(shell ls -p web/ | grep -v / | sed 's/^/web\//g') web/node_modules/.package-lock.json web/config.json patch-db/client/dist/index.js sdk/baseDist/package.json web/patchdb-ui-seed.json sdk/dist/package.json
|
||||||
|
WEB_UI_SRC := $(call ls-files, web/projects/ui)
|
||||||
|
WEB_SETUP_WIZARD_SRC := $(call ls-files, web/projects/setup-wizard)
|
||||||
|
WEB_START_TUNNEL_SRC := $(call ls-files, web/projects/start-tunnel)
|
||||||
|
PATCH_DB_CLIENT_SRC := $(shell git ls-files --recurse-submodules patch-db/client)
|
||||||
GZIP_BIN := $(shell which pigz || which gzip)
|
GZIP_BIN := $(shell which pigz || which gzip)
|
||||||
$(shell sudo true)
|
TAR_BIN := $(shell which gtar || which tar)
|
||||||
|
COMPILED_TARGETS := core/target/$(RUST_ARCH)-unknown-linux-musl/$(PROFILE)/startbox core/target/$(RUST_ARCH)-unknown-linux-musl/release/start-container container-runtime/rootfs.$(ARCH).squashfs
|
||||||
|
STARTOS_TARGETS := $(STARTD_SRC) $(ENVIRONMENT_FILE) $(GIT_HASH_FILE) $(VERSION_FILE) $(COMPILED_TARGETS) target/$(RUST_ARCH)-unknown-linux-musl/release/startos-backup-fs $(PLATFORM_FILE) \
|
||||||
|
$(shell if [ "$(PLATFORM)" = "raspberrypi" ]; then \
|
||||||
|
echo target/aarch64-unknown-linux-musl/release/pi-beep; \
|
||||||
|
fi) \
|
||||||
|
$(shell /bin/bash -c 'if [[ "${ENVIRONMENT}" =~ (^|-)unstable($$|-) ]]; then \
|
||||||
|
echo target/$(RUST_ARCH)-unknown-linux-musl/release/flamegraph; \
|
||||||
|
fi') \
|
||||||
|
$(shell /bin/bash -c 'if [[ "${ENVIRONMENT}" =~ (^|-)console($$|-) ]]; then \
|
||||||
|
echo target/$(RUST_ARCH)-unknown-linux-musl/release/tokio-console; \
|
||||||
|
fi')
|
||||||
|
REGISTRY_TARGETS := core/target/$(RUST_ARCH)-unknown-linux-musl/$(PROFILE)/registrybox core/start-registryd.service
|
||||||
|
TUNNEL_TARGETS := core/target/$(RUST_ARCH)-unknown-linux-musl/$(PROFILE)/tunnelbox core/start-tunneld.service
|
||||||
|
|
||||||
|
ifeq ($(REMOTE),)
|
||||||
|
mkdir = mkdir -p $1
|
||||||
|
rm = rm -rf $1
|
||||||
|
cp = cp -r $1 $2
|
||||||
|
ln = ln -sf $1 $2
|
||||||
|
else
|
||||||
|
ifeq ($(SSHPASS),)
|
||||||
|
ssh = ssh $(REMOTE) $1
|
||||||
|
else
|
||||||
|
ssh = sshpass -p $(SSHPASS) ssh $(REMOTE) $1
|
||||||
|
endif
|
||||||
|
mkdir = $(call ssh,'sudo mkdir -p $1')
|
||||||
|
rm = $(call ssh,'sudo rm -rf $1')
|
||||||
|
ln = $(call ssh,'sudo ln -sf $1 $2')
|
||||||
|
define cp
|
||||||
|
$(TAR_BIN) --transform "s|^$1|x|" -czv -f- $1 | $(call ssh,"sudo tar --transform 's|^x|$2|' -xzv -f- -C /")
|
||||||
|
endef
|
||||||
|
endif
|
||||||
|
|
||||||
.DELETE_ON_ERROR:
|
.DELETE_ON_ERROR:
|
||||||
|
|
||||||
.PHONY: all gzip clean format sdk snapshots frontends ui backend
|
.PHONY: all metadata install clean format install-cli cli uis ui reflash deb $(IMAGE_TYPE) squashfs wormhole wormhole-deb test test-core test-sdk test-container-runtime registry install-registry tunnel install-tunnel ts-bindings
|
||||||
all: eos.img
|
|
||||||
|
|
||||||
gzip: eos.tar.gz
|
all: $(STARTOS_TARGETS)
|
||||||
|
|
||||||
eos.tar.gz: eos.img
|
touch:
|
||||||
tar --format=posix -cS -f- eos.img | $(GZIP_BIN) > eos.tar.gz
|
touch $(STARTOS_TARGETS)
|
||||||
|
|
||||||
|
metadata: $(VERSION_FILE) $(PLATFORM_FILE) $(ENVIRONMENT_FILE) $(GIT_HASH_FILE)
|
||||||
|
|
||||||
clean:
|
clean:
|
||||||
rm -f eos.img
|
rm -rf core/target
|
||||||
rm -f ubuntu.img
|
rm -rf core/bindings
|
||||||
rm -f product_key.txt
|
rm -rf web/.angular
|
||||||
rm -f system-images/**/*.tar
|
rm -f web/config.json
|
||||||
sudo rm -f $(EMBASSY_BINS)
|
rm -rf web/node_modules
|
||||||
rm -f frontend/config.json
|
rm -rf web/dist
|
||||||
rm -rf frontend/node_modules
|
|
||||||
rm -rf frontend/dist
|
|
||||||
rm -rf patch-db/client/node_modules
|
rm -rf patch-db/client/node_modules
|
||||||
rm -rf patch-db/client/dist
|
rm -rf patch-db/client/dist
|
||||||
sudo rm -rf cargo-deps
|
rm -rf patch-db/target
|
||||||
|
rm -rf target
|
||||||
|
rm -rf dpkg-workdir
|
||||||
|
rm -rf image-recipe/deb
|
||||||
|
rm -rf results
|
||||||
|
rm -rf build/lib/firmware
|
||||||
|
rm -rf container-runtime/dist
|
||||||
|
rm -rf container-runtime/node_modules
|
||||||
|
rm -f container-runtime/*.squashfs
|
||||||
|
(cd sdk && make clean)
|
||||||
|
rm -f env/*.txt
|
||||||
|
|
||||||
format:
|
format:
|
||||||
cd backend && cargo +nightly fmt
|
cd core && cargo +nightly fmt
|
||||||
cd libs && cargo +nightly fmt
|
|
||||||
|
|
||||||
sdk:
|
test: | test-core test-sdk test-container-runtime
|
||||||
cd backend/ && ./install-sdk.sh
|
|
||||||
|
|
||||||
eos.img: $(EMBASSY_SRC) system-images/compat/compat.tar system-images/utils/utils.tar system-images/binfmt/binfmt.tar cargo-deps/aarch64-unknown-linux-gnu/release/nc-broadcast $(ENVIRONMENT_FILE) $(GIT_HASH_FILE)
|
test-core: $(CORE_SRC) $(ENVIRONMENT_FILE)
|
||||||
! test -f eos.img || rm eos.img
|
./core/run-tests.sh
|
||||||
if [ "$(NO_KEY)" = "1" ]; then NO_KEY=1 ./build/make-image.sh; else ./build/make-image.sh; fi
|
|
||||||
|
|
||||||
system-images/compat/compat.tar: $(COMPAT_SRC)
|
test-sdk: $(call ls-files, sdk) sdk/base/lib/osBindings/index.ts
|
||||||
cd system-images/compat && make
|
cd sdk && make test
|
||||||
|
|
||||||
system-images/utils/utils.tar: $(UTILS_SRC)
|
test-container-runtime: container-runtime/node_modules/.package-lock.json $(call ls-files, container-runtime/src) container-runtime/package.json container-runtime/tsconfig.json
|
||||||
cd system-images/utils && make
|
cd container-runtime && npm test
|
||||||
|
|
||||||
system-images/binfmt/binfmt.tar: $(BINFMT_SRC)
|
install-cli: $(GIT_HASH_FILE)
|
||||||
cd system-images/binfmt && make
|
./core/build/build-cli.sh --install
|
||||||
|
|
||||||
raspios.img:
|
cli: $(GIT_HASH_FILE)
|
||||||
wget --continue https://downloads.raspberrypi.org/raspios_lite_arm64/images/raspios_lite_arm64-2022-01-28/2022-01-28-raspios-bullseye-arm64-lite.zip
|
./core/build/build-cli.sh
|
||||||
unzip 2022-01-28-raspios-bullseye-arm64-lite.zip
|
|
||||||
mv 2022-01-28-raspios-bullseye-arm64-lite.img raspios.img
|
|
||||||
|
|
||||||
product_key.txt:
|
registry: core/target/$(RUST_ARCH)-unknown-linux-musl/$(PROFILE)/registrybox
|
||||||
$(shell which echo) -n "X" > product_key.txt
|
|
||||||
cat /dev/urandom | base32 | head -c11 | tr '[:upper:]' '[:lower:]' >> product_key.txt
|
|
||||||
if [ "$(KEY)" != "" ]; then $(shell which echo) -n "$(KEY)" > product_key.txt; fi
|
|
||||||
echo >> product_key.txt
|
|
||||||
|
|
||||||
snapshots: libs/snapshot-creator/Cargo.toml
|
install-registry: $(REGISTRY_TARGETS)
|
||||||
cd libs/ && ./build-v8-snapshot.sh
|
$(call mkdir,$(DESTDIR)/usr/bin)
|
||||||
cd libs/ && ./build-arm-v8-snapshot.sh
|
$(call cp,core/target/$(RUST_ARCH)-unknown-linux-musl/$(PROFILE)/registrybox,$(DESTDIR)/usr/bin/start-registrybox)
|
||||||
|
$(call ln,/usr/bin/start-registrybox,$(DESTDIR)/usr/bin/start-registryd)
|
||||||
|
$(call ln,/usr/bin/start-registrybox,$(DESTDIR)/usr/bin/start-registry)
|
||||||
|
|
||||||
$(EMBASSY_BINS): $(BACKEND_SRC) $(ENVIRONMENT_FILE) $(GIT_HASH_FILE) frontend/patchdb-ui-seed.json
|
$(call mkdir,$(DESTDIR)/lib/systemd/system)
|
||||||
cd backend && ./build-prod.sh
|
$(call cp,core/start-registryd.service,$(DESTDIR)/lib/systemd/system/start-registryd.service)
|
||||||
touch $(EMBASSY_BINS)
|
|
||||||
|
|
||||||
frontend/node_modules: frontend/package.json
|
core/target/$(RUST_ARCH)-unknown-linux-musl/$(PROFILE)/registrybox: $(CORE_SRC) $(ENVIRONMENT_FILE)
|
||||||
npm --prefix frontend ci
|
ARCH=$(ARCH) PROFILE=$(PROFILE) ./core/build/build-registrybox.sh
|
||||||
|
|
||||||
frontend/dist/ui: $(FRONTEND_UI_SRC) $(FRONTEND_SHARED_SRC) $(ENVIRONMENT_FILE)
|
tunnel: core/target/$(RUST_ARCH)-unknown-linux-musl/$(PROFILE)/tunnelbox
|
||||||
npm --prefix frontend run build:ui
|
|
||||||
|
|
||||||
frontend/dist/setup-wizard: $(FRONTEND_SETUP_WIZARD_SRC) $(FRONTEND_SHARED_SRC) $(ENVIRONMENT_FILE)
|
install-tunnel: core/target/$(RUST_ARCH)-unknown-linux-musl/$(PROFILE)/tunnelbox core/start-tunneld.service
|
||||||
npm --prefix frontend run build:setup
|
$(call mkdir,$(DESTDIR)/usr/bin)
|
||||||
|
$(call cp,core/target/$(RUST_ARCH)-unknown-linux-musl/$(PROFILE)/tunnelbox,$(DESTDIR)/usr/bin/start-tunnelbox)
|
||||||
|
$(call ln,/usr/bin/start-tunnelbox,$(DESTDIR)/usr/bin/start-tunneld)
|
||||||
|
$(call ln,/usr/bin/start-tunnelbox,$(DESTDIR)/usr/bin/start-tunnel)
|
||||||
|
|
||||||
frontend/dist/diagnostic-ui: $(FRONTEND_DIAGNOSTIC_UI_SRC) $(FRONTEND_SHARED_SRC) $(ENVIRONMENT_FILE)
|
$(call mkdir,$(DESTDIR)/lib/systemd/system)
|
||||||
npm --prefix frontend run build:dui
|
$(call cp,core/start-tunneld.service,$(DESTDIR)/lib/systemd/system/start-tunneld.service)
|
||||||
|
|
||||||
frontend/config.json: $(GIT_HASH_FILE) frontend/config-sample.json
|
$(call mkdir,$(DESTDIR)/usr/lib/startos/scripts)
|
||||||
jq '.useMocks = false' frontend/config-sample.json > frontend/config.json
|
$(call cp,build/lib/scripts/forward-port,$(DESTDIR)/usr/lib/startos/scripts/forward-port)
|
||||||
npm --prefix frontend run-script build-config
|
|
||||||
|
|
||||||
frontend/patchdb-ui-seed.json: frontend/package.json
|
core/target/$(RUST_ARCH)-unknown-linux-musl/$(PROFILE)/tunnelbox: $(CORE_SRC) $(ENVIRONMENT_FILE) $(GIT_HASH_FILE) web/dist/static/start-tunnel/index.html
|
||||||
jq '."ack-welcome" = "$(shell yq '.version' frontend/package.json)"' frontend/patchdb-ui-seed.json > ui-seed.tmp
|
ARCH=$(ARCH) PROFILE=$(PROFILE) ./core/build/build-tunnelbox.sh
|
||||||
mv ui-seed.tmp frontend/patchdb-ui-seed.json
|
|
||||||
|
|
||||||
patch-db/client/node_modules: patch-db/client/package.json
|
deb: results/$(BASENAME).deb
|
||||||
|
|
||||||
|
results/$(BASENAME).deb: debian/dpkg-build.sh $(call ls-files,debian/startos) $(STARTOS_TARGETS)
|
||||||
|
PLATFORM=$(PLATFORM) REQUIRES=debian ./build/os-compat/run-compat.sh ./debian/dpkg-build.sh
|
||||||
|
|
||||||
|
registry-deb: results/$(REGISTRY_BASENAME).deb
|
||||||
|
|
||||||
|
results/$(REGISTRY_BASENAME).deb: debian/dpkg-build.sh $(call ls-files,debian/start-registry) $(REGISTRY_TARGETS)
|
||||||
|
PROJECT=start-registry PLATFORM=$(ARCH) REQUIRES=debian ./build/os-compat/run-compat.sh ./debian/dpkg-build.sh
|
||||||
|
|
||||||
|
tunnel-deb: results/$(TUNNEL_BASENAME).deb
|
||||||
|
|
||||||
|
results/$(TUNNEL_BASENAME).deb: debian/dpkg-build.sh $(call ls-files,debian/start-tunnel) $(TUNNEL_TARGETS) build/lib/scripts/forward-port
|
||||||
|
PROJECT=start-tunnel PLATFORM=$(ARCH) REQUIRES=debian DEPENDS=wireguard-tools,iptables,conntrack ./build/os-compat/run-compat.sh ./debian/dpkg-build.sh
|
||||||
|
|
||||||
|
$(IMAGE_TYPE): results/$(BASENAME).$(IMAGE_TYPE)
|
||||||
|
|
||||||
|
squashfs: results/$(BASENAME).squashfs
|
||||||
|
|
||||||
|
results/$(BASENAME).$(IMAGE_TYPE) results/$(BASENAME).squashfs: $(IMAGE_RECIPE_SRC) results/$(BASENAME).deb
|
||||||
|
ARCH=$(ARCH) ./build/image-recipe/run-local-build.sh "results/$(BASENAME).deb"
|
||||||
|
|
||||||
|
# For creating os images. DO NOT USE
|
||||||
|
install: $(STARTOS_TARGETS)
|
||||||
|
$(call mkdir,$(DESTDIR)/usr/bin)
|
||||||
|
$(call mkdir,$(DESTDIR)/usr/sbin)
|
||||||
|
$(call cp,core/target/$(RUST_ARCH)-unknown-linux-musl/$(PROFILE)/startbox,$(DESTDIR)/usr/bin/startbox)
|
||||||
|
$(call ln,/usr/bin/startbox,$(DESTDIR)/usr/bin/startd)
|
||||||
|
$(call ln,/usr/bin/startbox,$(DESTDIR)/usr/bin/start-cli)
|
||||||
|
if [ "$(PLATFORM)" = "raspberrypi" ]; then $(call cp,target/aarch64-unknown-linux-musl/release/pi-beep,$(DESTDIR)/usr/bin/pi-beep); fi
|
||||||
|
if /bin/bash -c '[[ "${ENVIRONMENT}" =~ (^|-)unstable($$|-) ]]'; then \
|
||||||
|
$(call cp,target/$(RUST_ARCH)-unknown-linux-musl/release/flamegraph,$(DESTDIR)/usr/bin/flamegraph); \
|
||||||
|
fi
|
||||||
|
if /bin/bash -c '[[ "${ENVIRONMENT}" =~ (^|-)console($$|-) ]]'; then \
|
||||||
|
$(call cp,target/$(RUST_ARCH)-unknown-linux-musl/release/tokio-console,$(DESTDIR)/usr/bin/tokio-console); \
|
||||||
|
fi
|
||||||
|
$(call cp,target/$(RUST_ARCH)-unknown-linux-musl/release/startos-backup-fs,$(DESTDIR)/usr/bin/startos-backup-fs)
|
||||||
|
$(call ln,/usr/bin/startos-backup-fs,$(DESTDIR)/usr/sbin/mount.backup-fs)
|
||||||
|
|
||||||
|
$(call mkdir,$(DESTDIR)/lib/systemd/system)
|
||||||
|
$(call cp,core/startd.service,$(DESTDIR)/lib/systemd/system/startd.service)
|
||||||
|
|
||||||
|
$(call mkdir,$(DESTDIR)/usr/lib)
|
||||||
|
$(call rm,$(DESTDIR)/usr/lib/startos)
|
||||||
|
$(call cp,build/lib,$(DESTDIR)/usr/lib/startos)
|
||||||
|
$(call mkdir,$(DESTDIR)/usr/lib/startos/container-runtime)
|
||||||
|
$(call cp,container-runtime/rootfs.$(ARCH).squashfs,$(DESTDIR)/usr/lib/startos/container-runtime/rootfs.squashfs)
|
||||||
|
|
||||||
|
$(call cp,build/env/PLATFORM.txt,$(DESTDIR)/usr/lib/startos/PLATFORM.txt)
|
||||||
|
$(call cp,build/env/ENVIRONMENT.txt,$(DESTDIR)/usr/lib/startos/ENVIRONMENT.txt)
|
||||||
|
$(call cp,build/env/GIT_HASH.txt,$(DESTDIR)/usr/lib/startos/GIT_HASH.txt)
|
||||||
|
$(call cp,build/env/VERSION.txt,$(DESTDIR)/usr/lib/startos/VERSION.txt)
|
||||||
|
|
||||||
|
update-overlay: $(STARTOS_TARGETS)
|
||||||
|
@echo "\033[33m!!! THIS WILL ONLY REFLASH YOUR DEVICE IN MEMORY !!!\033[0m"
|
||||||
|
@echo "\033[33mALL CHANGES WILL BE REVERTED IF YOU RESTART THE DEVICE\033[0m"
|
||||||
|
@if [ -z "$(REMOTE)" ]; then >&2 echo "Must specify REMOTE" && false; fi
|
||||||
|
@if [ "`ssh $(REMOTE) 'cat /usr/lib/startos/VERSION.txt'`" != "`cat $(VERSION_FILE)`" ]; then >&2 echo "StartOS requires migrations: update-overlay is unavailable." && false; fi
|
||||||
|
$(call ssh,"sudo systemctl stop startd")
|
||||||
|
$(MAKE) install REMOTE=$(REMOTE) SSHPASS=$(SSHPASS) PLATFORM=$(PLATFORM)
|
||||||
|
$(call ssh,"sudo systemctl start startd")
|
||||||
|
|
||||||
|
wormhole: core/target/$(RUST_ARCH)-unknown-linux-musl/$(PROFILE)/startbox
|
||||||
|
@echo "Paste the following command into the shell of your StartOS server:"
|
||||||
|
@echo
|
||||||
|
@wormhole send core/target/$(RUST_ARCH)-unknown-linux-musl/$(PROFILE)/startbox 2>&1 | awk -Winteractive '/wormhole receive/ { printf "sudo /usr/lib/startos/scripts/chroot-and-upgrade \"cd /usr/bin && rm startbox && wormhole receive --accept-file %s && chmod +x startbox\"\n", $$3 }'
|
||||||
|
|
||||||
|
wormhole-deb: results/$(BASENAME).deb
|
||||||
|
@echo "Paste the following command into the shell of your StartOS server:"
|
||||||
|
@echo
|
||||||
|
@wormhole send results/$(BASENAME).deb 2>&1 | awk -Winteractive '/wormhole receive/ { printf "sudo /usr/lib/startos/scripts/chroot-and-upgrade '"'"'cd $$(mktemp -d) && wormhole receive --accept-file %s && apt-get install -y --reinstall ./$(BASENAME).deb'"'"'\n", $$3 }'
|
||||||
|
|
||||||
|
wormhole-squashfs: results/$(BASENAME).squashfs
|
||||||
|
$(eval SQFS_SUM := $(shell b3sum results/$(BASENAME).squashfs | head -c 32))
|
||||||
|
$(eval SQFS_SIZE := $(shell du -s --bytes results/$(BASENAME).squashfs | awk '{print $$1}'))
|
||||||
|
@echo "Paste the following command into the shell of your StartOS server:"
|
||||||
|
@echo
|
||||||
|
@wormhole send results/$(BASENAME).squashfs 2>&1 | awk -Winteractive '/wormhole receive/ { printf "sudo sh -c '"'"'/usr/lib/startos/scripts/prune-images $(SQFS_SIZE) && /usr/lib/startos/scripts/prune-boot && cd /media/startos/images && wormhole receive --accept-file %s && CHECKSUM=$(SQFS_SUM) /usr/lib/startos/scripts/upgrade ./$(BASENAME).squashfs'"'"'\n", $$3 }'
|
||||||
|
|
||||||
|
update: $(STARTOS_TARGETS)
|
||||||
|
@if [ -z "$(REMOTE)" ]; then >&2 echo "Must specify REMOTE" && false; fi
|
||||||
|
$(call ssh,'sudo /usr/lib/startos/scripts/chroot-and-upgrade --create')
|
||||||
|
$(MAKE) install REMOTE=$(REMOTE) SSHPASS=$(SSHPASS) DESTDIR=/media/startos/next PLATFORM=$(PLATFORM)
|
||||||
|
$(call ssh,'sudo /media/startos/next/usr/lib/startos/scripts/chroot-and-upgrade --no-sync "apt-get install -y $(shell cat ./build/lib/depends)"')
|
||||||
|
|
||||||
|
update-startbox: core/target/$(RUST_ARCH)-unknown-linux-musl/$(PROFILE)/startbox # only update binary (faster than full update)
|
||||||
|
@if [ -z "$(REMOTE)" ]; then >&2 echo "Must specify REMOTE" && false; fi
|
||||||
|
$(call ssh,'sudo /usr/lib/startos/scripts/chroot-and-upgrade --create')
|
||||||
|
$(call cp,core/target/$(RUST_ARCH)-unknown-linux-musl/$(PROFILE)/startbox,/media/startos/next/usr/bin/startbox)
|
||||||
|
$(call ssh,'sudo /media/startos/next/usr/lib/startos/scripts/chroot-and-upgrade --no-sync true')
|
||||||
|
|
||||||
|
update-deb: results/$(BASENAME).deb # better than update, but only available from debian
|
||||||
|
@if [ -z "$(REMOTE)" ]; then >&2 echo "Must specify REMOTE" && false; fi
|
||||||
|
$(call ssh,'sudo /usr/lib/startos/scripts/chroot-and-upgrade --create')
|
||||||
|
$(call mkdir,/media/startos/next/tmp/startos-deb)
|
||||||
|
$(call cp,results/$(BASENAME).deb,/media/startos/next/tmp/startos-deb/$(BASENAME).deb)
|
||||||
|
$(call ssh,'sudo /media/startos/next/usr/lib/startos/scripts/chroot-and-upgrade --no-sync "apt-get install -y --reinstall /tmp/startos-deb/$(BASENAME).deb"')
|
||||||
|
|
||||||
|
update-squashfs: results/$(BASENAME).squashfs
|
||||||
|
@if [ -z "$(REMOTE)" ]; then >&2 echo "Must specify REMOTE" && false; fi
|
||||||
|
$(eval SQFS_SUM := $(shell b3sum results/$(BASENAME).squashfs))
|
||||||
|
$(eval SQFS_SIZE := $(shell du -s --bytes results/$(BASENAME).squashfs | awk '{print $$1}'))
|
||||||
|
$(call ssh,'/usr/lib/startos/scripts/prune-images $(SQFS_SIZE)')
|
||||||
|
$(call ssh,'/usr/lib/startos/scripts/prune-boot')
|
||||||
|
$(call cp,results/$(BASENAME).squashfs,/media/startos/images/next.rootfs)
|
||||||
|
$(call ssh,'sudo CHECKSUM=$(SQFS_SUM) /usr/lib/startos/scripts/upgrade /media/startos/images/next.rootfs')
|
||||||
|
|
||||||
|
emulate-reflash: $(STARTOS_TARGETS)
|
||||||
|
@if [ -z "$(REMOTE)" ]; then >&2 echo "Must specify REMOTE" && false; fi
|
||||||
|
$(call ssh,'sudo /usr/lib/startos/scripts/chroot-and-upgrade --create')
|
||||||
|
$(MAKE) install REMOTE=$(REMOTE) SSHPASS=$(SSHPASS) DESTDIR=/media/startos/next PLATFORM=$(PLATFORM)
|
||||||
|
$(call ssh,'sudo rm -f /media/startos/config/disk.guid /media/startos/config/overlay/etc/hostname')
|
||||||
|
$(call ssh,'sudo /media/startos/next/usr/lib/startos/scripts/chroot-and-upgrade --no-sync "apt-get install -y $(shell cat ./build/lib/depends)"')
|
||||||
|
|
||||||
|
upload-ota: results/$(BASENAME).squashfs
|
||||||
|
TARGET=$(TARGET) KEY=$(KEY) ./build/upload-ota.sh
|
||||||
|
|
||||||
|
container-runtime/debian.$(ARCH).squashfs: ./container-runtime/download-base-image.sh
|
||||||
|
ARCH=$(ARCH) ./container-runtime/download-base-image.sh
|
||||||
|
|
||||||
|
container-runtime/package-lock.json: sdk/dist/package.json
|
||||||
|
npm --prefix container-runtime i
|
||||||
|
touch container-runtime/package-lock.json
|
||||||
|
|
||||||
|
container-runtime/node_modules/.package-lock.json: container-runtime/package-lock.json
|
||||||
|
npm --prefix container-runtime ci
|
||||||
|
touch container-runtime/node_modules/.package-lock.json
|
||||||
|
|
||||||
|
ts-bindings: core/bindings/index.ts
|
||||||
|
mkdir -p sdk/base/lib/osBindings
|
||||||
|
rsync -ac --delete core/bindings/ sdk/base/lib/osBindings/
|
||||||
|
|
||||||
|
core/bindings/index.ts: $(call ls-files, core) $(ENVIRONMENT_FILE)
|
||||||
|
rm -rf core/bindings
|
||||||
|
./core/build/build-ts.sh
|
||||||
|
ls core/bindings/*.ts | sed 's/core\/bindings\/\([^.]*\)\.ts/export { \1 } from ".\/\1";/g' | grep -v '"./index"' | tee core/bindings/index.ts
|
||||||
|
npm --prefix sdk exec -- prettier --config ./sdk/base/package.json -w ./core/bindings/*.ts
|
||||||
|
touch core/bindings/index.ts
|
||||||
|
|
||||||
|
sdk/dist/package.json sdk/baseDist/package.json: $(call ls-files, sdk) sdk/base/lib/osBindings/index.ts
|
||||||
|
(cd sdk && make bundle)
|
||||||
|
touch sdk/dist/package.json
|
||||||
|
touch sdk/baseDist/package.json
|
||||||
|
|
||||||
|
# TODO: make container-runtime its own makefile?
|
||||||
|
container-runtime/dist/index.js: container-runtime/node_modules/.package-lock.json $(call ls-files, container-runtime/src) container-runtime/package.json container-runtime/tsconfig.json
|
||||||
|
npm --prefix container-runtime run build
|
||||||
|
|
||||||
|
container-runtime/dist/node_modules/.package-lock.json container-runtime/dist/package.json container-runtime/dist/package-lock.json: container-runtime/package.json container-runtime/package-lock.json sdk/dist/package.json container-runtime/install-dist-deps.sh
|
||||||
|
./container-runtime/install-dist-deps.sh
|
||||||
|
touch container-runtime/dist/node_modules/.package-lock.json
|
||||||
|
|
||||||
|
container-runtime/rootfs.$(ARCH).squashfs: container-runtime/debian.$(ARCH).squashfs container-runtime/container-runtime.service container-runtime/update-image.sh container-runtime/update-image-local.sh container-runtime/deb-install.sh container-runtime/dist/index.js container-runtime/dist/node_modules/.package-lock.json core/target/$(RUST_ARCH)-unknown-linux-musl/release/start-container
|
||||||
|
ARCH=$(ARCH) ./container-runtime/update-image-local.sh
|
||||||
|
|
||||||
|
build/lib/depends build/lib/conflicts: $(ENVIRONMENT_FILE) $(PLATFORM_FILE) $(shell ls build/dpkg-deps/*)
|
||||||
|
PLATFORM=$(PLATFORM) ARCH=$(ARCH) build/dpkg-deps/generate.sh
|
||||||
|
|
||||||
|
$(FIRMWARE_ROMS): build/lib/firmware.json ./build/download-firmware.sh $(PLATFORM_FILE)
|
||||||
|
./build/download-firmware.sh $(PLATFORM)
|
||||||
|
|
||||||
|
core/target/$(RUST_ARCH)-unknown-linux-musl/$(PROFILE)/startbox: $(CORE_SRC) $(COMPRESSED_WEB_UIS) web/patchdb-ui-seed.json $(ENVIRONMENT_FILE)
|
||||||
|
ARCH=$(ARCH) PROFILE=$(PROFILE) ./core/build/build-startbox.sh
|
||||||
|
touch core/target/$(RUST_ARCH)-unknown-linux-musl/$(PROFILE)/startbox
|
||||||
|
|
||||||
|
core/target/$(RUST_ARCH)-unknown-linux-musl/release/start-container: $(CORE_SRC) $(ENVIRONMENT_FILE)
|
||||||
|
ARCH=$(ARCH) ./core/build/build-start-container.sh
|
||||||
|
touch core/target/$(RUST_ARCH)-unknown-linux-musl/release/start-container
|
||||||
|
|
||||||
|
web/package-lock.json: web/package.json sdk/baseDist/package.json
|
||||||
|
npm --prefix web i
|
||||||
|
touch web/package-lock.json
|
||||||
|
|
||||||
|
web/node_modules/.package-lock.json: web/package-lock.json
|
||||||
|
npm --prefix web ci
|
||||||
|
touch web/node_modules/.package-lock.json
|
||||||
|
|
||||||
|
web/.angular/.updated: patch-db/client/dist/index.js sdk/baseDist/package.json web/node_modules/.package-lock.json
|
||||||
|
rm -rf web/.angular
|
||||||
|
mkdir -p web/.angular
|
||||||
|
touch web/.angular/.updated
|
||||||
|
|
||||||
|
web/.i18n-checked: $(WEB_SHARED_SRC) $(WEB_UI_SRC) $(WEB_SETUP_WIZARD_SRC) $(WEB_START_TUNNEL_SRC)
|
||||||
|
npm --prefix web run check:i18n
|
||||||
|
touch web/.i18n-checked
|
||||||
|
|
||||||
|
web/dist/raw/ui/index.html: $(WEB_UI_SRC) $(WEB_SHARED_SRC) web/.angular/.updated web/.i18n-checked
|
||||||
|
npm --prefix web run build:ui
|
||||||
|
touch web/dist/raw/ui/index.html
|
||||||
|
|
||||||
|
web/dist/raw/setup-wizard/index.html: $(WEB_SETUP_WIZARD_SRC) $(WEB_SHARED_SRC) web/.angular/.updated web/.i18n-checked
|
||||||
|
npm --prefix web run build:setup
|
||||||
|
touch web/dist/raw/setup-wizard/index.html
|
||||||
|
|
||||||
|
web/dist/raw/start-tunnel/index.html: $(WEB_START_TUNNEL_SRC) $(WEB_SHARED_SRC) web/.angular/.updated web/.i18n-checked
|
||||||
|
npm --prefix web run build:tunnel
|
||||||
|
touch web/dist/raw/start-tunnel/index.html
|
||||||
|
|
||||||
|
web/dist/static/%/index.html: web/dist/raw/%/index.html
|
||||||
|
./web/compress-uis.sh $*
|
||||||
|
|
||||||
|
web/config.json: $(GIT_HASH_FILE) $(ENVIRONMENT_FILE) web/config-sample.json web/update-config.sh
|
||||||
|
./web/update-config.sh
|
||||||
|
|
||||||
|
patch-db/client/node_modules/.package-lock.json: patch-db/client/package.json
|
||||||
npm --prefix patch-db/client ci
|
npm --prefix patch-db/client ci
|
||||||
|
touch patch-db/client/node_modules/.package-lock.json
|
||||||
|
|
||||||
patch-db/client/dist: $(PATCH_DB_CLIENT_SRC) patch-db/client/node_modules
|
patch-db/client/dist/index.js: $(PATCH_DB_CLIENT_SRC) patch-db/client/node_modules/.package-lock.json
|
||||||
! test -d patch-db/client/dist || rm -rf patch-db/client/dist
|
rm -rf patch-db/client/dist
|
||||||
npm --prefix frontend run build:deps
|
npm --prefix patch-db/client run build
|
||||||
|
touch patch-db/client/dist/index.js
|
||||||
|
|
||||||
# used by github actions
|
# used by github actions
|
||||||
backend-$(ARCH).tar: $(ENVIRONMENT_FILE) $(GIT_HASH_FILE) $(EMBASSY_BINS)
|
compiled-$(ARCH).tar: $(COMPILED_TARGETS) $(ENVIRONMENT_FILE) $(GIT_HASH_FILE) $(VERSION_FILE)
|
||||||
tar -cvf $@ $^
|
tar -cvf $@ $^
|
||||||
|
|
||||||
# this is a convenience step to build all frontends - it is not referenced elsewhere in this file
|
# this is a convenience step to build all web uis - it is not referenced elsewhere in this file
|
||||||
frontends: $(EMBASSY_UIS)
|
uis: $(WEB_UIS)
|
||||||
|
|
||||||
# this is a convenience step to build the UI
|
# this is a convenience step to build the UI
|
||||||
ui: frontend/dist/ui
|
ui: web/dist/raw/ui
|
||||||
|
|
||||||
# used by github actions
|
target/aarch64-unknown-linux-musl/release/pi-beep: ./build/build-cargo-dep.sh
|
||||||
backend: $(EMBASSY_BINS)
|
ARCH=aarch64 ./build/build-cargo-dep.sh pi-beep
|
||||||
|
|
||||||
cargo-deps/aarch64-unknown-linux-gnu/release/nc-broadcast:
|
target/$(RUST_ARCH)-unknown-linux-musl/release/tokio-console: ./build/build-cargo-dep.sh
|
||||||
./build-cargo-dep.sh nc-broadcast
|
ARCH=$(ARCH) ./build/build-cargo-dep.sh tokio-console
|
||||||
|
touch $@
|
||||||
|
|
||||||
|
target/$(RUST_ARCH)-unknown-linux-musl/release/startos-backup-fs: ./build/build-cargo-dep.sh
|
||||||
|
ARCH=$(ARCH) ./build/build-cargo-dep.sh --git https://github.com/Start9Labs/start-fs.git startos-backup-fs
|
||||||
|
touch $@
|
||||||
|
|
||||||
|
target/$(RUST_ARCH)-unknown-linux-musl/release/flamegraph: ./build/build-cargo-dep.sh
|
||||||
|
ARCH=$(ARCH) ./build/build-cargo-dep.sh flamegraph
|
||||||
|
touch $@
|
||||||
|
|||||||
125
README.md
@@ -1,51 +1,82 @@
|
|||||||
# embassyOS
|
<div align="center">
|
||||||
[](https://github.com/Start9Labs/embassy-os/releases)
|
<img src="web/projects/shared/assets/img/icon.png" alt="StartOS Logo" width="16%" />
|
||||||
[](https://github.com/Start9Labs/embassy-os/actions/workflows/product.yaml)
|
<h1 style="margin-top: 0;">StartOS</h1>
|
||||||
[](https://matrix.to/#/#community:matrix.start9labs.com)
|
<a href="https://github.com/Start9Labs/start-os/releases">
|
||||||
[](https://t.me/start9_labs)
|
<img alt="GitHub release (with filter)" src="https://img.shields.io/github/v/release/start9labs/start-os?logo=github">
|
||||||
[](https://docs.start9labs.com)
|
</a>
|
||||||
[](https://matrix.to/#/#community-dev:matrix.start9labs.com)
|
<a href="https://github.com/Start9Labs/start-os/actions/workflows/startos-iso.yaml">
|
||||||
[](https://start9labs.com)
|
<img src="https://github.com/Start9Labs/start-os/actions/workflows/startos-iso.yaml/badge.svg">
|
||||||
|
</a>
|
||||||
[](http://mastodon.start9labs.com)
|
<a href="https://heyapollo.com/product/startos">
|
||||||
[](https://twitter.com/start9labs)
|
<img alt="Static Badge" src="https://img.shields.io/badge/apollo-review%20%E2%AD%90%E2%AD%90%E2%AD%90%E2%AD%90%E2%AD%90%20-slateblue">
|
||||||
|
</a>
|
||||||
### _Welcome to the era of Sovereign Computing_ ###
|
<a href="https://twitter.com/start9labs">
|
||||||
|
<img alt="X (formerly Twitter) Follow" src="https://img.shields.io/twitter/follow/start9labs">
|
||||||
embassyOS is a browser-based, graphical operating system for a personal server. embassyOS facilitates the discovery, installation, network configuration, service configuration, data backup, dependency management, and health monitoring of self-hosted software services. It is the most advanced, secure, reliable, and user friendly personal server OS in the world.
|
</a>
|
||||||
|
<a href="https://matrix.to/#/#community:matrix.start9labs.com">
|
||||||
## Running embassyOS
|
<img alt="Static Badge" src="https://img.shields.io/badge/community-matrix-yellow?logo=matrix">
|
||||||
There are multiple ways to get your hands on embassyOS.
|
</a>
|
||||||
|
<a href="https://t.me/start9_labs">
|
||||||
### :moneybag: Buy an Embassy
|
<img alt="Static Badge" src="https://img.shields.io/badge/community-telegram-blue?logo=telegram">
|
||||||
This is the most convenient option. Simply [buy an Embassy](https://start9.com) from Start9 and plug it in. Depending on where you live, shipping costs and import duties will vary.
|
</a>
|
||||||
|
<a href="https://docs.start9.com">
|
||||||
### :construction_worker: Build your own Embassy
|
<img alt="Static Badge" src="https://img.shields.io/badge/docs-orange?label=%F0%9F%91%A4%20support">
|
||||||
While not as convenient as buying an Embassy, this option is easier than you might imagine, and there are 4 reasons why you might prefer it:
|
</a>
|
||||||
1. You already have a Raspberry Pi and would like to re-purpose it.
|
<a href="https://matrix.to/#/#community-dev:matrix.start9labs.com">
|
||||||
1. You want to save on shipping costs.
|
<img alt="Static Badge" src="https://img.shields.io/badge/developer-matrix-darkcyan?logo=matrix">
|
||||||
1. You prefer not to divulge your physical address.
|
</a>
|
||||||
1. You just like building things.
|
<a href="https://start9.com">
|
||||||
|
<img alt="Website" src="https://img.shields.io/website?up_message=online&down_message=offline&url=https%3A%2F%2Fstart9.com&logo=website&label=%F0%9F%8C%90%20website">
|
||||||
To pursue this option, follow this [guide](https://start9.com/latest/diy).
|
</a>
|
||||||
|
</div>
|
||||||
### :hammer_and_wrench: Build embassyOS from Source
|
<br />
|
||||||
|
<div align="center">
|
||||||
embassyOS can be built from source, for personal use, for free.
|
<h3>
|
||||||
A detailed guide for doing so can be found [here](https://github.com/Start9Labs/embassy-os/blob/master/build/README.md).
|
Welcome to the era of Sovereign Computing
|
||||||
|
</h3>
|
||||||
## :heart: Contributing
|
<p>
|
||||||
There are multiple ways to contribute: work directly on embassyOS, package a service for the marketplace, or help with documentation and guides. To learn more about contributing, see [here](https://github.com/Start9Labs/embassy-os/blob/master/CONTRIBUTING.md).
|
StartOS is an open source Linux distribution optimized for running a personal server. It facilitates the discovery, installation, network configuration, service configuration, data backup, dependency management, and health monitoring of self-hosted software services.
|
||||||
|
</p>
|
||||||
## UI Screenshots
|
</div>
|
||||||
|
<br />
|
||||||
<p align="center">
|
<p align="center">
|
||||||
<img src="assets/embassyOS.png" alt="embassyOS" width="85%">
|
<img src="assets/StartOS.png" alt="StartOS" width="85%">
|
||||||
</p>
|
</p>
|
||||||
|
<br />
|
||||||
|
|
||||||
|
## Running StartOS
|
||||||
|
> [!WARNING]
|
||||||
|
> StartOS is in beta. It lacks features. It doesn't always work perfectly. Start9 servers are not plug and play. Using them properly requires some effort and patience. Please do not use StartOS or purchase a server if you are unable or unwilling to follow instructions and learn new concepts.
|
||||||
|
|
||||||
|
### 💰 Buy a Start9 server
|
||||||
|
This is the most convenient option. Simply [buy a server](https://store.start9.com) from Start9 and plug it in.
|
||||||
|
|
||||||
|
### 👷 Build your own server
|
||||||
|
This option is easier than you might imagine, and there are 4 reasons why you might prefer it:
|
||||||
|
1. You already have hardware
|
||||||
|
1. You want to save on shipping costs
|
||||||
|
1. You prefer not to divulge your physical address
|
||||||
|
1. You just like building things
|
||||||
|
|
||||||
|
To pursue this option, follow one of our [DIY guides](https://start9.com/latest/diy).
|
||||||
|
|
||||||
|
## ❤️ Contributing
|
||||||
|
There are multiple ways to contribute: work directly on StartOS, package a service for the marketplace, or help with documentation and guides. To learn more about contributing, see [here](https://start9.com/contribute/).
|
||||||
|
|
||||||
|
To report security issues, please email our security team - security@start9.com.
|
||||||
|
|
||||||
|
## 🌎 Marketplace
|
||||||
|
There are dozens of services available for StartOS, and new ones are being added all the time. Check out the full list of available services [here](https://marketplace.start9.com/marketplace). To read more about the Marketplace ecosystem, check out this [blog post](https://blog.start9.com/start9-marketplace-strategy/)
|
||||||
|
|
||||||
|
## 🖥️ User Interface Screenshots
|
||||||
|
|
||||||
<p align="center">
|
<p align="center">
|
||||||
<img src="assets/eOS-preferences.png" alt="Embassy Preferences" width="49%">
|
<img src="assets/registry.png" alt="StartOS Marketplace" width="49%">
|
||||||
<img src="assets/eOS-ghost.png" alt="Embassy Ghost Service" width="49%">
|
<img src="assets/community.png" alt="StartOS Community Registry" width="49%">
|
||||||
</p>
|
<img src="assets/c-lightning.png" alt="StartOS NextCloud Service" width="49%">
|
||||||
<p align="center">
|
<img src="assets/btcpay.png" alt="StartOS BTCPay Service" width="49%">
|
||||||
<img src="assets/eOS-synapse-health-check.png" alt="Embassy Synapse Health Checks" width="49%">
|
<img src="assets/nextcloud.png" alt="StartOS System Settings" width="49%">
|
||||||
<img src="assets/eOS-sideload.png" alt="Embassy Sideload Service" width="49%">
|
<img src="assets/system.png" alt="StartOS System Settings" width="49%">
|
||||||
|
<img src="assets/welcome.png" alt="StartOS System Settings" width="49%">
|
||||||
|
<img src="assets/logs.png" alt="StartOS System Settings" width="49%">
|
||||||
</p>
|
</p>
|
||||||
|
|||||||
9
agents/TODO.md
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
# AI Agent TODOs
|
||||||
|
|
||||||
|
Pending tasks for AI agents. Remove items when completed.
|
||||||
|
|
||||||
|
## Unreviewed CLAUDE.md Sections
|
||||||
|
|
||||||
|
- [ ] Architecture - Web (`/web`) - @MattDHill
|
||||||
|
|
||||||
|
|
||||||
201
agents/VERSION_BUMP.md
Normal file
@@ -0,0 +1,201 @@
|
|||||||
|
# StartOS Version Bump Guide
|
||||||
|
|
||||||
|
This document explains how to bump the StartOS version across the entire codebase.
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
When bumping from version `X.Y.Z-alpha.N` to `X.Y.Z-alpha.N+1`, you need to update files in multiple locations across the repository. The `// VERSION_BUMP` comment markers indicate where changes are needed.
|
||||||
|
|
||||||
|
## Files to Update
|
||||||
|
|
||||||
|
### 1. Core Rust Crate Version
|
||||||
|
|
||||||
|
**File: `core/Cargo.toml`**
|
||||||
|
|
||||||
|
Update the version string (line ~18):
|
||||||
|
|
||||||
|
```toml
|
||||||
|
version = "0.4.0-alpha.15" # VERSION_BUMP
|
||||||
|
```
|
||||||
|
|
||||||
|
**File: `core/Cargo.lock`**
|
||||||
|
|
||||||
|
This file is auto-generated. After updating `Cargo.toml`, run:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cd core
|
||||||
|
cargo check
|
||||||
|
```
|
||||||
|
|
||||||
|
This will update the version in `Cargo.lock` automatically.
|
||||||
|
|
||||||
|
### 2. Create New Version Migration Module
|
||||||
|
|
||||||
|
**File: `core/src/version/vX_Y_Z_alpha_N+1.rs`**
|
||||||
|
|
||||||
|
Create a new version file by copying the previous version and updating:
|
||||||
|
|
||||||
|
```rust
|
||||||
|
use exver::{PreReleaseSegment, VersionRange};
|
||||||
|
|
||||||
|
use super::v0_3_5::V0_3_0_COMPAT;
|
||||||
|
use super::{VersionT, v0_4_0_alpha_14}; // Update to previous version
|
||||||
|
use crate::prelude::*;
|
||||||
|
|
||||||
|
lazy_static::lazy_static! {
|
||||||
|
static ref V0_4_0_alpha_15: exver::Version = exver::Version::new(
|
||||||
|
[0, 4, 0],
|
||||||
|
[PreReleaseSegment::String("alpha".into()), 15.into()] // Update number
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Copy, Debug, Default)]
|
||||||
|
pub struct Version;
|
||||||
|
|
||||||
|
impl VersionT for Version {
|
||||||
|
type Previous = v0_4_0_alpha_14::Version; // Update to previous version
|
||||||
|
type PreUpRes = ();
|
||||||
|
|
||||||
|
async fn pre_up(self) -> Result<Self::PreUpRes, Error> {
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
fn semver(self) -> exver::Version {
|
||||||
|
V0_4_0_alpha_15.clone() // Update version name
|
||||||
|
}
|
||||||
|
fn compat(self) -> &'static VersionRange {
|
||||||
|
&V0_3_0_COMPAT
|
||||||
|
}
|
||||||
|
#[instrument(skip_all)]
|
||||||
|
fn up(self, _db: &mut Value, _: Self::PreUpRes) -> Result<Value, Error> {
|
||||||
|
// Add migration logic here if needed
|
||||||
|
Ok(Value::Null)
|
||||||
|
}
|
||||||
|
fn down(self, _db: &mut Value) -> Result<(), Error> {
|
||||||
|
// Add rollback logic here if needed
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### 3. Update Version Module Registry
|
||||||
|
|
||||||
|
**File: `core/src/version/mod.rs`**
|
||||||
|
|
||||||
|
Make changes in **5 locations**:
|
||||||
|
|
||||||
|
#### Location 1: Module Declaration (~line 57)
|
||||||
|
|
||||||
|
Add the new module after the previous version:
|
||||||
|
|
||||||
|
```rust
|
||||||
|
mod v0_4_0_alpha_14;
|
||||||
|
mod v0_4_0_alpha_15; // Add this
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Location 2: Current Type Alias (~line 59)
|
||||||
|
|
||||||
|
Update the `Current` type and move the `// VERSION_BUMP` comment:
|
||||||
|
|
||||||
|
```rust
|
||||||
|
pub type Current = v0_4_0_alpha_15::Version; // VERSION_BUMP
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Location 3: Version Enum (~line 175)
|
||||||
|
|
||||||
|
Remove `// VERSION_BUMP` from the previous version, add new variant, add comment:
|
||||||
|
|
||||||
|
```rust
|
||||||
|
V0_4_0_alpha_14(Wrapper<v0_4_0_alpha_14::Version>),
|
||||||
|
V0_4_0_alpha_15(Wrapper<v0_4_0_alpha_15::Version>), // VERSION_BUMP
|
||||||
|
Other(exver::Version),
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Location 4: as_version_t() Match (~line 233)
|
||||||
|
|
||||||
|
Remove `// VERSION_BUMP`, add new match arm, add comment:
|
||||||
|
|
||||||
|
```rust
|
||||||
|
Self::V0_4_0_alpha_14(v) => DynVersion(Box::new(v.0)),
|
||||||
|
Self::V0_4_0_alpha_15(v) => DynVersion(Box::new(v.0)), // VERSION_BUMP
|
||||||
|
Self::Other(v) => {
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Location 5: as_exver() Match (~line 284, inside #[cfg(test)])
|
||||||
|
|
||||||
|
Remove `// VERSION_BUMP`, add new match arm, add comment:
|
||||||
|
|
||||||
|
```rust
|
||||||
|
Version::V0_4_0_alpha_14(Wrapper(x)) => x.semver(),
|
||||||
|
Version::V0_4_0_alpha_15(Wrapper(x)) => x.semver(), // VERSION_BUMP
|
||||||
|
Version::Other(x) => x.clone(),
|
||||||
|
```
|
||||||
|
|
||||||
|
### 4. SDK TypeScript Version
|
||||||
|
|
||||||
|
**File: `sdk/package/lib/StartSdk.ts`**
|
||||||
|
|
||||||
|
Update the OSVersion constant (~line 64):
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
export const OSVersion = testTypeVersion("0.4.0-alpha.15");
|
||||||
|
```
|
||||||
|
|
||||||
|
### 5. Web UI Package Version
|
||||||
|
|
||||||
|
**File: `web/package.json`**
|
||||||
|
|
||||||
|
Update the version field:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"name": "startos-ui",
|
||||||
|
"version": "0.4.0-alpha.15",
|
||||||
|
...
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**File: `web/package-lock.json`**
|
||||||
|
|
||||||
|
This file is auto-generated, but it's faster to update manually. Find all instances of "startos-ui" and update the version field.
|
||||||
|
|
||||||
|
## Verification Step
|
||||||
|
|
||||||
|
```
|
||||||
|
make
|
||||||
|
```
|
||||||
|
|
||||||
|
## VERSION_BUMP Comment Pattern
|
||||||
|
|
||||||
|
The `// VERSION_BUMP` comment serves as a marker for where to make changes next time:
|
||||||
|
|
||||||
|
- Always **remove** it from the old location
|
||||||
|
- **Add** the new version entry
|
||||||
|
- **Move** the comment to mark the new location
|
||||||
|
|
||||||
|
This pattern helps you quickly find all the places that need updating in the next version bump.
|
||||||
|
|
||||||
|
## Summary Checklist
|
||||||
|
|
||||||
|
- [ ] Update `core/Cargo.toml` version
|
||||||
|
- [ ] Create new `core/src/version/vX_Y_Z_alpha_N+1.rs` file
|
||||||
|
- [ ] Update `core/src/version/mod.rs` in 5 locations
|
||||||
|
- [ ] Run `cargo check` to update `core/Cargo.lock`
|
||||||
|
- [ ] Update `sdk/package/lib/StartSdk.ts` OSVersion
|
||||||
|
- [ ] Update `web/package.json` and `web/package-lock.json` version
|
||||||
|
- [ ] Verify all changes compile/build successfully
|
||||||
|
|
||||||
|
## Migration Logic
|
||||||
|
|
||||||
|
The `up()` and `down()` methods in the version file handle database migrations:
|
||||||
|
|
||||||
|
- **up()**: Migrates the database from the previous version to this version
|
||||||
|
- **down()**: Rolls back from this version to the previous version
|
||||||
|
- **pre_up()**: Runs before migration, useful for pre-migration checks or data gathering
|
||||||
|
|
||||||
|
If no migration is needed, return `Ok(Value::Null)` for `up()` and `Ok(())` for `down()`.
|
||||||
|
|
||||||
|
For complex migrations, you may need to:
|
||||||
|
|
||||||
|
1. Update `type PreUpRes` to pass data between `pre_up()` and `up()`
|
||||||
|
2. Implement database transformations in the `up()` method
|
||||||
|
3. Implement reverse transformations in `down()` for rollback support
|
||||||
249
agents/core-rust-patterns.md
Normal file
@@ -0,0 +1,249 @@
|
|||||||
|
# Utilities & Patterns
|
||||||
|
|
||||||
|
This document covers common utilities and patterns used throughout the StartOS codebase.
|
||||||
|
|
||||||
|
## Util Module (`core/src/util/`)
|
||||||
|
|
||||||
|
The `util` module contains reusable utilities. Key submodules:
|
||||||
|
|
||||||
|
| Module | Purpose |
|
||||||
|
|--------|---------|
|
||||||
|
| `actor/` | Actor pattern implementation for concurrent state management |
|
||||||
|
| `collections/` | Custom collection types |
|
||||||
|
| `crypto.rs` | Cryptographic utilities (encryption, hashing) |
|
||||||
|
| `future.rs` | Future/async utilities |
|
||||||
|
| `io.rs` | File I/O helpers (create_file, canonicalize, etc.) |
|
||||||
|
| `iter.rs` | Iterator extensions |
|
||||||
|
| `net.rs` | Network utilities |
|
||||||
|
| `rpc.rs` | RPC helpers |
|
||||||
|
| `rpc_client.rs` | RPC client utilities |
|
||||||
|
| `serde.rs` | Serialization helpers (Base64, display/fromstr, etc.) |
|
||||||
|
| `sync.rs` | Synchronization primitives (SyncMutex, etc.) |
|
||||||
|
|
||||||
|
## Command Invocation (`Invoke` trait)
|
||||||
|
|
||||||
|
The `Invoke` trait provides a clean way to run external commands with error handling:
|
||||||
|
|
||||||
|
```rust
|
||||||
|
use crate::util::Invoke;
|
||||||
|
|
||||||
|
// Simple invocation
|
||||||
|
tokio::process::Command::new("ls")
|
||||||
|
.arg("-la")
|
||||||
|
.invoke(ErrorKind::Filesystem)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
// With timeout
|
||||||
|
tokio::process::Command::new("slow-command")
|
||||||
|
.timeout(Some(Duration::from_secs(30)))
|
||||||
|
.invoke(ErrorKind::Timeout)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
// With input
|
||||||
|
let mut input = Cursor::new(b"input data");
|
||||||
|
tokio::process::Command::new("cat")
|
||||||
|
.input(Some(&mut input))
|
||||||
|
.invoke(ErrorKind::Filesystem)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
// Piped commands
|
||||||
|
tokio::process::Command::new("cat")
|
||||||
|
.arg("file.txt")
|
||||||
|
.pipe(&mut tokio::process::Command::new("grep").arg("pattern"))
|
||||||
|
.invoke(ErrorKind::Filesystem)
|
||||||
|
.await?;
|
||||||
|
```
|
||||||
|
|
||||||
|
## Guard Pattern
|
||||||
|
|
||||||
|
Guards ensure cleanup happens when they go out of scope.
|
||||||
|
|
||||||
|
### `GeneralGuard` / `GeneralBoxedGuard`
|
||||||
|
|
||||||
|
For arbitrary cleanup actions:
|
||||||
|
|
||||||
|
```rust
|
||||||
|
use crate::util::GeneralGuard;
|
||||||
|
|
||||||
|
let guard = GeneralGuard::new(|| {
|
||||||
|
println!("Cleanup runs on drop");
|
||||||
|
});
|
||||||
|
|
||||||
|
// Do work...
|
||||||
|
|
||||||
|
// Explicit drop with action
|
||||||
|
guard.drop();
|
||||||
|
|
||||||
|
// Or skip the action
|
||||||
|
// guard.drop_without_action();
|
||||||
|
```
|
||||||
|
|
||||||
|
### `FileLock`
|
||||||
|
|
||||||
|
File-based locking with automatic unlock:
|
||||||
|
|
||||||
|
```rust
|
||||||
|
use crate::util::FileLock;
|
||||||
|
|
||||||
|
let lock = FileLock::new("/path/to/lockfile", true).await?; // blocking=true
|
||||||
|
// Lock held until dropped or explicitly unlocked
|
||||||
|
lock.unlock().await?;
|
||||||
|
```
|
||||||
|
|
||||||
|
## Mount Guard Pattern (`core/src/disk/mount/guard.rs`)
|
||||||
|
|
||||||
|
RAII guards for filesystem mounts. Ensures filesystems are unmounted when guards are dropped.
|
||||||
|
|
||||||
|
### `MountGuard`
|
||||||
|
|
||||||
|
Basic mount guard:
|
||||||
|
|
||||||
|
```rust
|
||||||
|
use crate::disk::mount::guard::MountGuard;
|
||||||
|
use crate::disk::mount::filesystem::{MountType, ReadOnly};
|
||||||
|
|
||||||
|
let guard = MountGuard::mount(&filesystem, "/mnt/target", ReadOnly).await?;
|
||||||
|
|
||||||
|
// Use the mounted filesystem at guard.path()
|
||||||
|
do_something(guard.path()).await?;
|
||||||
|
|
||||||
|
// Explicit unmount (or auto-unmounts on drop)
|
||||||
|
guard.unmount(false).await?; // false = don't delete mountpoint
|
||||||
|
```
|
||||||
|
|
||||||
|
### `TmpMountGuard`
|
||||||
|
|
||||||
|
Reference-counted temporary mount (mounts to `/media/startos/tmp/`):
|
||||||
|
|
||||||
|
```rust
|
||||||
|
use crate::disk::mount::guard::TmpMountGuard;
|
||||||
|
use crate::disk::mount::filesystem::ReadOnly;
|
||||||
|
|
||||||
|
// Multiple clones share the same mount
|
||||||
|
let guard1 = TmpMountGuard::mount(&filesystem, ReadOnly).await?;
|
||||||
|
let guard2 = guard1.clone();
|
||||||
|
|
||||||
|
// Mount stays alive while any guard exists
|
||||||
|
// Auto-unmounts when last guard is dropped
|
||||||
|
```
|
||||||
|
|
||||||
|
### `GenericMountGuard` trait
|
||||||
|
|
||||||
|
All mount guards implement this trait:
|
||||||
|
|
||||||
|
```rust
|
||||||
|
pub trait GenericMountGuard: std::fmt::Debug + Send + Sync + 'static {
|
||||||
|
fn path(&self) -> &Path;
|
||||||
|
fn unmount(self) -> impl Future<Output = Result<(), Error>> + Send;
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### `SubPath`
|
||||||
|
|
||||||
|
Wraps a mount guard to point to a subdirectory:
|
||||||
|
|
||||||
|
```rust
|
||||||
|
use crate::disk::mount::guard::SubPath;
|
||||||
|
|
||||||
|
let mount = TmpMountGuard::mount(&filesystem, ReadOnly).await?;
|
||||||
|
let subdir = SubPath::new(mount, "data/subdir");
|
||||||
|
|
||||||
|
// subdir.path() returns the full path including subdirectory
|
||||||
|
```
|
||||||
|
|
||||||
|
## FileSystem Implementations (`core/src/disk/mount/filesystem/`)
|
||||||
|
|
||||||
|
Various filesystem types that can be mounted:
|
||||||
|
|
||||||
|
| Type | Description |
|
||||||
|
|------|-------------|
|
||||||
|
| `bind.rs` | Bind mounts |
|
||||||
|
| `block_dev.rs` | Block device mounts |
|
||||||
|
| `cifs.rs` | CIFS/SMB network shares |
|
||||||
|
| `ecryptfs.rs` | Encrypted filesystem |
|
||||||
|
| `efivarfs.rs` | EFI variables |
|
||||||
|
| `httpdirfs.rs` | HTTP directory as filesystem |
|
||||||
|
| `idmapped.rs` | ID-mapped mounts |
|
||||||
|
| `label.rs` | Mount by label |
|
||||||
|
| `loop_dev.rs` | Loop device mounts |
|
||||||
|
| `overlayfs.rs` | Overlay filesystem |
|
||||||
|
|
||||||
|
## Other Useful Utilities
|
||||||
|
|
||||||
|
### `Apply` / `ApplyRef` traits
|
||||||
|
|
||||||
|
Fluent method chaining:
|
||||||
|
|
||||||
|
```rust
|
||||||
|
use crate::util::Apply;
|
||||||
|
|
||||||
|
let result = some_value
|
||||||
|
.apply(|v| transform(v))
|
||||||
|
.apply(|v| another_transform(v));
|
||||||
|
```
|
||||||
|
|
||||||
|
### `Container<T>`
|
||||||
|
|
||||||
|
Async-safe optional container:
|
||||||
|
|
||||||
|
```rust
|
||||||
|
use crate::util::Container;
|
||||||
|
|
||||||
|
let container = Container::new(None);
|
||||||
|
container.set(value).await;
|
||||||
|
let taken = container.take().await;
|
||||||
|
```
|
||||||
|
|
||||||
|
### `HashWriter<H, W>`
|
||||||
|
|
||||||
|
Write data while computing hash:
|
||||||
|
|
||||||
|
```rust
|
||||||
|
use crate::util::HashWriter;
|
||||||
|
use sha2::Sha256;
|
||||||
|
|
||||||
|
let writer = HashWriter::new(Sha256::new(), file);
|
||||||
|
// Write data...
|
||||||
|
let (hasher, file) = writer.finish();
|
||||||
|
let hash = hasher.finalize();
|
||||||
|
```
|
||||||
|
|
||||||
|
### `Never` type
|
||||||
|
|
||||||
|
Uninhabited type for impossible cases:
|
||||||
|
|
||||||
|
```rust
|
||||||
|
use crate::util::Never;
|
||||||
|
|
||||||
|
fn impossible() -> Never {
|
||||||
|
// This function can never return
|
||||||
|
}
|
||||||
|
|
||||||
|
let never: Never = impossible();
|
||||||
|
never.absurd::<String>() // Can convert to any type
|
||||||
|
```
|
||||||
|
|
||||||
|
### `MaybeOwned<'a, T>`
|
||||||
|
|
||||||
|
Either borrowed or owned data:
|
||||||
|
|
||||||
|
```rust
|
||||||
|
use crate::util::MaybeOwned;
|
||||||
|
|
||||||
|
fn accept_either(data: MaybeOwned<'_, String>) {
|
||||||
|
// Use &*data to access the value
|
||||||
|
}
|
||||||
|
|
||||||
|
accept_either(MaybeOwned::from(&existing_string));
|
||||||
|
accept_either(MaybeOwned::from(owned_string));
|
||||||
|
```
|
||||||
|
|
||||||
|
### `new_guid()`
|
||||||
|
|
||||||
|
Generate a random GUID:
|
||||||
|
|
||||||
|
```rust
|
||||||
|
use crate::util::new_guid;
|
||||||
|
|
||||||
|
let guid = new_guid(); // Returns InternedString
|
||||||
|
```
|
||||||
100
agents/i18n-patterns.md
Normal file
@@ -0,0 +1,100 @@
|
|||||||
|
# i18n Patterns in `core/`
|
||||||
|
|
||||||
|
## Library & Setup
|
||||||
|
|
||||||
|
**Crate:** [`rust-i18n`](https://crates.io/crates/rust-i18n) v3.1.5 (`core/Cargo.toml`)
|
||||||
|
|
||||||
|
**Initialization** (`core/src/lib.rs:3`):
|
||||||
|
```rust
|
||||||
|
rust_i18n::i18n!("locales", fallback = ["en_US"]);
|
||||||
|
```
|
||||||
|
This macro scans `core/locales/` at compile time and embeds all translations as constants.
|
||||||
|
|
||||||
|
**Prelude re-export** (`core/src/prelude.rs:4`):
|
||||||
|
```rust
|
||||||
|
pub use rust_i18n::t;
|
||||||
|
```
|
||||||
|
Most modules import `t!` via the prelude.
|
||||||
|
|
||||||
|
## Translation File
|
||||||
|
|
||||||
|
**Location:** `core/locales/i18n.yaml`
|
||||||
|
**Format:** YAML v2 (~755 keys)
|
||||||
|
|
||||||
|
**Supported languages:** `en_US`, `de_DE`, `es_ES`, `fr_FR`, `pl_PL`
|
||||||
|
|
||||||
|
**Entry structure:**
|
||||||
|
```yaml
|
||||||
|
namespace.sub.key-name:
|
||||||
|
en_US: "English text with %{param}"
|
||||||
|
de_DE: "German text with %{param}"
|
||||||
|
# ...
|
||||||
|
```
|
||||||
|
|
||||||
|
## Using `t!()`
|
||||||
|
|
||||||
|
```rust
|
||||||
|
// Simple key
|
||||||
|
t!("error.unknown")
|
||||||
|
|
||||||
|
// With parameter interpolation (%{name} in YAML)
|
||||||
|
t!("bins.deprecated.renamed", old = old_name, new = new_name)
|
||||||
|
```
|
||||||
|
|
||||||
|
## Key Naming Conventions
|
||||||
|
|
||||||
|
Keys use **dot-separated hierarchical namespaces** with **kebab-case** for multi-word segments:
|
||||||
|
|
||||||
|
```
|
||||||
|
<module>.<submodule>.<descriptive-name>
|
||||||
|
```
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
- `error.incorrect-password` — error kind label
|
||||||
|
- `bins.start-init.updating-firmware` — startup phase message
|
||||||
|
- `backup.bulk.complete-title` — backup notification title
|
||||||
|
- `help.arg.acme-contact` — CLI help text for an argument
|
||||||
|
- `context.diagnostic.starting-diagnostic-ui` — diagnostic context status
|
||||||
|
|
||||||
|
### Top-Level Namespaces
|
||||||
|
|
||||||
|
| Namespace | Purpose |
|
||||||
|
|-----------|---------|
|
||||||
|
| `error.*` | `ErrorKind` display strings (see `src/error.rs`) |
|
||||||
|
| `bins.*` | CLI binary messages (deprecated, start-init, startd, etc.) |
|
||||||
|
| `init.*` | Initialization phase labels |
|
||||||
|
| `setup.*` | First-run setup messages |
|
||||||
|
| `context.*` | Context startup messages (diagnostic, setup, CLI) |
|
||||||
|
| `service.*` | Service lifecycle messages |
|
||||||
|
| `backup.*` | Backup/restore operation messages |
|
||||||
|
| `registry.*` | Package registry messages |
|
||||||
|
| `net.*` | Network-related messages |
|
||||||
|
| `middleware.*` | Request middleware messages (auth, etc.) |
|
||||||
|
| `disk.*` | Disk operation messages |
|
||||||
|
| `lxc.*` | Container management messages |
|
||||||
|
| `system.*` | System monitoring/metrics messages |
|
||||||
|
| `notifications.*` | User-facing notification messages |
|
||||||
|
| `update.*` | OS update messages |
|
||||||
|
| `util.*` | Utility messages (TUI, RPC) |
|
||||||
|
| `ssh.*` | SSH operation messages |
|
||||||
|
| `shutdown.*` | Shutdown-related messages |
|
||||||
|
| `logs.*` | Log-related messages |
|
||||||
|
| `auth.*` | Authentication messages |
|
||||||
|
| `help.*` | CLI help text (`help.arg.<arg-name>`) |
|
||||||
|
| `about.*` | CLI command descriptions |
|
||||||
|
|
||||||
|
## Locale Selection
|
||||||
|
|
||||||
|
`core/src/bins/mod.rs:15-36` — `set_locale_from_env()`:
|
||||||
|
|
||||||
|
1. Reads `LANG` environment variable
|
||||||
|
2. Strips `.UTF-8` suffix
|
||||||
|
3. Exact-matches against available locales, falls back to language-prefix match (e.g. `en_GB` matches `en_US`)
|
||||||
|
|
||||||
|
## Adding New Keys
|
||||||
|
|
||||||
|
1. Add the key to `core/locales/i18n.yaml` with all 5 language translations
|
||||||
|
2. Use the `t!("your.key.name")` macro in Rust code
|
||||||
|
3. Follow existing namespace conventions — match the module path where the key is used
|
||||||
|
4. Use kebab-case for multi-word segments
|
||||||
|
5. Translations are validated at compile time
|
||||||
226
agents/rpc-toolkit.md
Normal file
@@ -0,0 +1,226 @@
|
|||||||
|
# rpc-toolkit
|
||||||
|
|
||||||
|
StartOS uses [rpc-toolkit](https://github.com/Start9Labs/rpc-toolkit) for its JSON-RPC API. This document covers the patterns used in this codebase.
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
The API is JSON-RPC (not REST). All endpoints are RPC methods organized in a hierarchical command structure.
|
||||||
|
|
||||||
|
## Handler Functions
|
||||||
|
|
||||||
|
There are four types of handler functions, chosen based on the function's characteristics:
|
||||||
|
|
||||||
|
### `from_fn_async` - Async handlers
|
||||||
|
For standard async functions. Most handlers use this.
|
||||||
|
|
||||||
|
```rust
|
||||||
|
pub async fn my_handler(ctx: RpcContext, params: MyParams) -> Result<MyResponse, Error> {
|
||||||
|
// Can use .await
|
||||||
|
}
|
||||||
|
|
||||||
|
from_fn_async(my_handler)
|
||||||
|
```
|
||||||
|
|
||||||
|
### `from_fn_async_local` - Non-thread-safe async handlers
|
||||||
|
For async functions that are not `Send` (cannot be safely moved between threads). Use when working with non-thread-safe types.
|
||||||
|
|
||||||
|
```rust
|
||||||
|
pub async fn cli_download(ctx: CliContext, params: Params) -> Result<(), Error> {
|
||||||
|
// Non-Send async operations
|
||||||
|
}
|
||||||
|
|
||||||
|
from_fn_async_local(cli_download)
|
||||||
|
```
|
||||||
|
|
||||||
|
### `from_fn_blocking` - Sync blocking handlers
|
||||||
|
For synchronous functions that perform blocking I/O or long computations.
|
||||||
|
|
||||||
|
```rust
|
||||||
|
pub fn query_dns(ctx: RpcContext, params: DnsParams) -> Result<DnsResponse, Error> {
|
||||||
|
// Blocking operations (file I/O, DNS lookup, etc.)
|
||||||
|
}
|
||||||
|
|
||||||
|
from_fn_blocking(query_dns)
|
||||||
|
```
|
||||||
|
|
||||||
|
### `from_fn` - Sync non-blocking handlers
|
||||||
|
For pure functions or quick synchronous operations with no I/O.
|
||||||
|
|
||||||
|
```rust
|
||||||
|
pub fn echo(ctx: RpcContext, params: EchoParams) -> Result<String, Error> {
|
||||||
|
Ok(params.message)
|
||||||
|
}
|
||||||
|
|
||||||
|
from_fn(echo)
|
||||||
|
```
|
||||||
|
|
||||||
|
## ParentHandler
|
||||||
|
|
||||||
|
Groups related RPC methods into a hierarchy:
|
||||||
|
|
||||||
|
```rust
|
||||||
|
use rpc_toolkit::{Context, HandlerExt, ParentHandler, from_fn_async};
|
||||||
|
|
||||||
|
pub fn my_api<C: Context>() -> ParentHandler<C> {
|
||||||
|
ParentHandler::new()
|
||||||
|
.subcommand("list", from_fn_async(list_handler).with_call_remote::<CliContext>())
|
||||||
|
.subcommand("create", from_fn_async(create_handler).with_call_remote::<CliContext>())
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Handler Extensions
|
||||||
|
|
||||||
|
Chain methods to configure handler behavior.
|
||||||
|
|
||||||
|
**Ordering rules:**
|
||||||
|
1. `with_about()` must come AFTER other CLI modifiers (`no_display()`, `with_custom_display_fn()`, etc.)
|
||||||
|
2. `with_call_remote()` must be the LAST adapter in the chain
|
||||||
|
|
||||||
|
| Method | Purpose |
|
||||||
|
|--------|---------|
|
||||||
|
| `.with_metadata("key", Value)` | Attach metadata for middleware |
|
||||||
|
| `.no_cli()` | RPC-only, not available via CLI |
|
||||||
|
| `.no_display()` | No CLI output |
|
||||||
|
| `.with_display_serializable()` | Default JSON/YAML output for CLI |
|
||||||
|
| `.with_custom_display_fn(\|_, res\| ...)` | Custom CLI output formatting |
|
||||||
|
| `.with_about("about.description")` | Add help text (i18n key) - **after CLI modifiers** |
|
||||||
|
| `.with_call_remote::<CliContext>()` | Enable CLI to call remotely - **must be last** |
|
||||||
|
|
||||||
|
### Correct ordering example:
|
||||||
|
```rust
|
||||||
|
from_fn_async(my_handler)
|
||||||
|
.with_metadata("sync_db", Value::Bool(true)) // metadata early
|
||||||
|
.no_display() // CLI modifier
|
||||||
|
.with_about("about.my-handler") // after CLI modifiers
|
||||||
|
.with_call_remote::<CliContext>() // always last
|
||||||
|
```
|
||||||
|
|
||||||
|
## Metadata by Middleware
|
||||||
|
|
||||||
|
Metadata tags are processed by different middleware. Group them logically:
|
||||||
|
|
||||||
|
### Auth Middleware (`middleware/auth/mod.rs`)
|
||||||
|
|
||||||
|
| Metadata | Default | Description |
|
||||||
|
|----------|---------|-------------|
|
||||||
|
| `authenticated` | `true` | Whether endpoint requires authentication. Set to `false` for public endpoints. |
|
||||||
|
|
||||||
|
### Session Auth Middleware (`middleware/auth/session.rs`)
|
||||||
|
|
||||||
|
| Metadata | Default | Description |
|
||||||
|
|----------|---------|-------------|
|
||||||
|
| `login` | `false` | Special handling for login endpoints (rate limiting, cookie setting) |
|
||||||
|
| `get_session` | `false` | Inject session ID into params as `__Auth_session` |
|
||||||
|
|
||||||
|
### Signature Auth Middleware (`middleware/auth/signature.rs`)
|
||||||
|
|
||||||
|
| Metadata | Default | Description |
|
||||||
|
|----------|---------|-------------|
|
||||||
|
| `get_signer` | `false` | Inject signer public key into params as `__Auth_signer` |
|
||||||
|
|
||||||
|
### Registry Auth (extends Signature Auth)
|
||||||
|
|
||||||
|
| Metadata | Default | Description |
|
||||||
|
|----------|---------|-------------|
|
||||||
|
| `admin` | `false` | Require admin privileges (signer must be in admin list) |
|
||||||
|
| `get_device_info` | `false` | Inject device info header for hardware filtering |
|
||||||
|
|
||||||
|
### Database Middleware (`middleware/db.rs`)
|
||||||
|
|
||||||
|
| Metadata | Default | Description |
|
||||||
|
|----------|---------|-------------|
|
||||||
|
| `sync_db` | `false` | Sync database after mutation, add `X-Patch-Sequence` header |
|
||||||
|
|
||||||
|
## Context Types
|
||||||
|
|
||||||
|
Different contexts for different execution environments:
|
||||||
|
|
||||||
|
- `RpcContext` - Web/RPC requests with full service access
|
||||||
|
- `CliContext` - CLI operations, calls remote RPC
|
||||||
|
- `InitContext` - During system initialization
|
||||||
|
- `DiagnosticContext` - Diagnostic/recovery mode
|
||||||
|
- `RegistryContext` - Registry daemon context
|
||||||
|
- `EffectContext` - Service effects context (container-to-host calls)
|
||||||
|
|
||||||
|
## Parameter Structs
|
||||||
|
|
||||||
|
Parameters use derive macros for JSON-RPC, CLI parsing, and TypeScript generation:
|
||||||
|
|
||||||
|
```rust
|
||||||
|
#[derive(Deserialize, Serialize, Parser, TS)]
|
||||||
|
#[serde(rename_all = "camelCase")] // JSON-RPC uses camelCase
|
||||||
|
#[command(rename_all = "kebab-case")] // CLI uses kebab-case
|
||||||
|
#[ts(export)] // Generate TypeScript types
|
||||||
|
pub struct MyParams {
|
||||||
|
pub package_id: PackageId,
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Middleware Injection
|
||||||
|
|
||||||
|
Auth middleware can inject values into params using special field names:
|
||||||
|
|
||||||
|
```rust
|
||||||
|
#[derive(Deserialize, Serialize, Parser, TS)]
|
||||||
|
pub struct MyParams {
|
||||||
|
#[ts(skip)]
|
||||||
|
#[serde(rename = "__Auth_session")] // Injected by session auth
|
||||||
|
session: InternedString,
|
||||||
|
|
||||||
|
#[ts(skip)]
|
||||||
|
#[serde(rename = "__Auth_signer")] // Injected by signature auth
|
||||||
|
signer: AnyVerifyingKey,
|
||||||
|
|
||||||
|
#[ts(skip)]
|
||||||
|
#[serde(rename = "__Auth_userAgent")] // Injected during login
|
||||||
|
user_agent: Option<String>,
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Common Patterns
|
||||||
|
|
||||||
|
### Adding a New RPC Endpoint
|
||||||
|
|
||||||
|
1. Define params struct with `Deserialize, Serialize, Parser, TS`
|
||||||
|
2. Choose handler type based on sync/async and thread-safety
|
||||||
|
3. Write handler function taking `(Context, Params) -> Result<Response, Error>`
|
||||||
|
4. Add to parent handler with appropriate extensions (display modifiers before `with_about`)
|
||||||
|
5. TypeScript types auto-generated via `make ts-bindings`
|
||||||
|
|
||||||
|
### Public (Unauthenticated) Endpoint
|
||||||
|
|
||||||
|
```rust
|
||||||
|
from_fn_async(get_info)
|
||||||
|
.with_metadata("authenticated", Value::Bool(false))
|
||||||
|
.with_display_serializable()
|
||||||
|
.with_about("about.get-info")
|
||||||
|
.with_call_remote::<CliContext>() // last
|
||||||
|
```
|
||||||
|
|
||||||
|
### Mutating Endpoint with DB Sync
|
||||||
|
|
||||||
|
```rust
|
||||||
|
from_fn_async(update_config)
|
||||||
|
.with_metadata("sync_db", Value::Bool(true))
|
||||||
|
.no_display()
|
||||||
|
.with_about("about.update-config")
|
||||||
|
.with_call_remote::<CliContext>() // last
|
||||||
|
```
|
||||||
|
|
||||||
|
### Session-Aware Endpoint
|
||||||
|
|
||||||
|
```rust
|
||||||
|
from_fn_async(logout)
|
||||||
|
.with_metadata("get_session", Value::Bool(true))
|
||||||
|
.no_display()
|
||||||
|
.with_about("about.logout")
|
||||||
|
.with_call_remote::<CliContext>() // last
|
||||||
|
```
|
||||||
|
|
||||||
|
## File Locations
|
||||||
|
|
||||||
|
- Handler definitions: Throughout `core/src/` modules
|
||||||
|
- Main API tree: `core/src/lib.rs` (`main_api()`, `server()`, `package()`)
|
||||||
|
- Auth middleware: `core/src/middleware/auth/`
|
||||||
|
- DB middleware: `core/src/middleware/db.rs`
|
||||||
|
- Context types: `core/src/context/`
|
||||||
122
agents/s9pk-structure.md
Normal file
@@ -0,0 +1,122 @@
|
|||||||
|
# S9PK Package Format
|
||||||
|
|
||||||
|
S9PK is the package format for StartOS services. Version 2 uses a merkle archive structure for efficient downloading and cryptographic verification.
|
||||||
|
|
||||||
|
## File Format
|
||||||
|
|
||||||
|
S9PK files begin with a 3-byte header: `0x3b 0x3b 0x02` (magic bytes + version 2).
|
||||||
|
|
||||||
|
The archive is cryptographically signed using Ed25519 with prehashed content (SHA-512 over blake3 merkle root hash).
|
||||||
|
|
||||||
|
## Archive Structure
|
||||||
|
|
||||||
|
```
|
||||||
|
/
|
||||||
|
├── manifest.json # Package metadata (required)
|
||||||
|
├── icon.<ext> # Package icon - any image/* format (required)
|
||||||
|
├── LICENSE.md # License text (required)
|
||||||
|
├── dependencies/ # Dependency metadata (optional)
|
||||||
|
│ └── <package-id>/
|
||||||
|
│ ├── metadata.json # DependencyMetadata
|
||||||
|
│ └── icon.<ext> # Dependency icon
|
||||||
|
├── javascript.squashfs # Package JavaScript code (required)
|
||||||
|
├── assets.squashfs # Static assets (optional, legacy: assets/ directory)
|
||||||
|
└── images/ # Container images by architecture
|
||||||
|
└── <arch>/ # e.g., x86_64, aarch64, riscv64
|
||||||
|
├── <image-id>.squashfs # Container filesystem
|
||||||
|
├── <image-id>.json # Image metadata
|
||||||
|
└── <image-id>.env # Environment variables
|
||||||
|
```
|
||||||
|
|
||||||
|
## Components
|
||||||
|
|
||||||
|
### manifest.json
|
||||||
|
|
||||||
|
The package manifest contains all metadata:
|
||||||
|
|
||||||
|
| Field | Type | Description |
|
||||||
|
|-------|------|-------------|
|
||||||
|
| `id` | string | Package identifier (e.g., `bitcoind`) |
|
||||||
|
| `title` | string | Display name |
|
||||||
|
| `version` | string | Extended version string |
|
||||||
|
| `satisfies` | string[] | Version ranges this version satisfies |
|
||||||
|
| `releaseNotes` | string/object | Release notes (localized) |
|
||||||
|
| `canMigrateTo` | string | Version range for forward migration |
|
||||||
|
| `canMigrateFrom` | string | Version range for backward migration |
|
||||||
|
| `license` | string | License type |
|
||||||
|
| `wrapperRepo` | string | StartOS wrapper repository URL |
|
||||||
|
| `upstreamRepo` | string | Upstream project URL |
|
||||||
|
| `supportSite` | string | Support site URL |
|
||||||
|
| `marketingSite` | string | Marketing site URL |
|
||||||
|
| `donationUrl` | string? | Optional donation URL |
|
||||||
|
| `docsUrl` | string? | Optional documentation URL |
|
||||||
|
| `description` | object | Short and long descriptions (localized) |
|
||||||
|
| `images` | object | Image configurations by image ID |
|
||||||
|
| `volumes` | string[] | Volume IDs for persistent data |
|
||||||
|
| `alerts` | object | User alerts for lifecycle events |
|
||||||
|
| `dependencies` | object | Package dependencies |
|
||||||
|
| `hardwareRequirements` | object | Hardware requirements (arch, RAM, devices) |
|
||||||
|
| `hardwareAcceleration` | boolean | Whether package uses hardware acceleration |
|
||||||
|
| `gitHash` | string? | Git commit hash |
|
||||||
|
| `osVersion` | string | Minimum StartOS version |
|
||||||
|
| `sdkVersion` | string? | SDK version used to build |
|
||||||
|
|
||||||
|
### javascript.squashfs
|
||||||
|
|
||||||
|
Contains the package JavaScript that implements the `ABI` interface from `@start9labs/start-sdk-base`. This code runs in the container runtime and manages the package lifecycle.
|
||||||
|
|
||||||
|
The squashfs is mounted at `/usr/lib/startos/package/` and the runtime loads `index.js`.
|
||||||
|
|
||||||
|
### images/
|
||||||
|
|
||||||
|
Container images organized by architecture:
|
||||||
|
|
||||||
|
- **`<image-id>.squashfs`** - Container root filesystem
|
||||||
|
- **`<image-id>.json`** - Image metadata (entrypoint, user, workdir, etc.)
|
||||||
|
- **`<image-id>.env`** - Environment variables for the container
|
||||||
|
|
||||||
|
Images are built from Docker/Podman and converted to squashfs. The `ImageConfig` in manifest specifies:
|
||||||
|
- `arch` - Supported architectures
|
||||||
|
- `emulateMissingAs` - Fallback architecture for emulation
|
||||||
|
- `nvidiaContainer` - Whether to enable NVIDIA container support
|
||||||
|
|
||||||
|
### assets.squashfs
|
||||||
|
|
||||||
|
Static assets accessible to the package, mounted read-only at `/media/startos/assets/` in the container.
|
||||||
|
|
||||||
|
### dependencies/
|
||||||
|
|
||||||
|
Metadata for dependencies displayed in the UI:
|
||||||
|
- `metadata.json` - Just title for now
|
||||||
|
- `icon.<ext>` - Icon for the dependency
|
||||||
|
|
||||||
|
## Merkle Archive
|
||||||
|
|
||||||
|
The S9PK uses a merkle tree structure where each file and directory has a blake3 hash. This enables:
|
||||||
|
|
||||||
|
1. **Partial downloads** - Download and verify individual files
|
||||||
|
2. **Integrity verification** - Verify any subset of the archive
|
||||||
|
3. **Efficient updates** - Only download changed portions
|
||||||
|
4. **DOS protection** - Size limits enforced before downloading content
|
||||||
|
|
||||||
|
Files are sorted by priority for streaming (manifest first, then icon, license, dependencies, javascript, assets, images).
|
||||||
|
|
||||||
|
## Building S9PK
|
||||||
|
|
||||||
|
Use `start-cli s9pk pack` to build packages:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
start-cli s9pk pack <manifest-path> -o <output.s9pk>
|
||||||
|
```
|
||||||
|
|
||||||
|
Images can be sourced from:
|
||||||
|
- Docker/Podman build (`--docker-build`)
|
||||||
|
- Existing Docker tag (`--docker-tag`)
|
||||||
|
- Pre-built squashfs files
|
||||||
|
|
||||||
|
## Related Code
|
||||||
|
|
||||||
|
- `core/src/s9pk/v2/mod.rs` - S9pk struct and serialization
|
||||||
|
- `core/src/s9pk/v2/manifest.rs` - Manifest types
|
||||||
|
- `core/src/s9pk/v2/pack.rs` - Packing logic
|
||||||
|
- `core/src/s9pk/merkle_archive/` - Merkle archive implementation
|
||||||
BIN
assets/StartOS.png
Normal file
|
After Width: | Height: | Size: 2.1 MiB |
BIN
assets/btcpay.png
Normal file
|
After Width: | Height: | Size: 396 KiB |
BIN
assets/c-lightning.png
Normal file
|
After Width: | Height: | Size: 402 KiB |
BIN
assets/community.png
Normal file
|
After Width: | Height: | Size: 591 KiB |
BIN
assets/create-vm/step-1.png
Normal file
|
After Width: | Height: | Size: 27 KiB |
BIN
assets/create-vm/step-10.png
Normal file
|
After Width: | Height: | Size: 44 KiB |
BIN
assets/create-vm/step-11.png
Normal file
|
After Width: | Height: | Size: 53 KiB |
BIN
assets/create-vm/step-12.png
Normal file
|
After Width: | Height: | Size: 48 KiB |
BIN
assets/create-vm/step-2.png
Normal file
|
After Width: | Height: | Size: 46 KiB |
BIN
assets/create-vm/step-3.png
Normal file
|
After Width: | Height: | Size: 46 KiB |
BIN
assets/create-vm/step-4.png
Normal file
|
After Width: | Height: | Size: 50 KiB |
BIN
assets/create-vm/step-5.png
Normal file
|
After Width: | Height: | Size: 64 KiB |
BIN
assets/create-vm/step-6.png
Normal file
|
After Width: | Height: | Size: 52 KiB |
BIN
assets/create-vm/step-7.png
Normal file
|
After Width: | Height: | Size: 64 KiB |
BIN
assets/create-vm/step-8.png
Normal file
|
After Width: | Height: | Size: 53 KiB |
BIN
assets/create-vm/step-9.png
Normal file
|
After Width: | Height: | Size: 44 KiB |
|
Before Width: | Height: | Size: 281 KiB |
|
Before Width: | Height: | Size: 266 KiB |
|
Before Width: | Height: | Size: 154 KiB |
|
Before Width: | Height: | Size: 213 KiB |
|
Before Width: | Height: | Size: 191 KiB |
BIN
assets/logs.png
Normal file
|
After Width: | Height: | Size: 1.6 MiB |
BIN
assets/nextcloud.png
Normal file
|
After Width: | Height: | Size: 319 KiB |
BIN
assets/registry.png
Normal file
|
After Width: | Height: | Size: 521 KiB |
BIN
assets/system.png
Normal file
|
After Width: | Height: | Size: 331 KiB |
BIN
assets/welcome.png
Normal file
|
After Width: | Height: | Size: 402 KiB |
10
backend/.gitignore
vendored
@@ -1,10 +0,0 @@
|
|||||||
/target
|
|
||||||
**/*.rs.bk
|
|
||||||
.DS_Store
|
|
||||||
.vscode
|
|
||||||
secrets.db
|
|
||||||
*.s9pk
|
|
||||||
*.sqlite3
|
|
||||||
.env
|
|
||||||
.editorconfig
|
|
||||||
proptest-regressions/*
|
|
||||||
5403
backend/Cargo.lock
generated
@@ -1,158 +0,0 @@
|
|||||||
[package]
|
|
||||||
authors = ["Aiden McClelland <me@drbonez.dev>"]
|
|
||||||
description = "The core of the Start9 Embassy Operating System"
|
|
||||||
documentation = "https://docs.rs/embassy-os"
|
|
||||||
edition = "2018"
|
|
||||||
keywords = [
|
|
||||||
"self-hosted",
|
|
||||||
"raspberry-pi",
|
|
||||||
"privacy",
|
|
||||||
"bitcoin",
|
|
||||||
"full-node",
|
|
||||||
"lightning",
|
|
||||||
]
|
|
||||||
name = "embassy-os"
|
|
||||||
readme = "README.md"
|
|
||||||
repository = "https://github.com/Start9Labs/embassy-os"
|
|
||||||
version = "0.3.2"
|
|
||||||
|
|
||||||
[lib]
|
|
||||||
name = "embassy"
|
|
||||||
path = "src/lib.rs"
|
|
||||||
|
|
||||||
[[bin]]
|
|
||||||
name = "embassyd"
|
|
||||||
path = "src/bin/embassyd.rs"
|
|
||||||
|
|
||||||
[[bin]]
|
|
||||||
name = "embassy-init"
|
|
||||||
path = "src/bin/embassy-init.rs"
|
|
||||||
|
|
||||||
[[bin]]
|
|
||||||
name = "embassy-sdk"
|
|
||||||
path = "src/bin/embassy-sdk.rs"
|
|
||||||
|
|
||||||
[[bin]]
|
|
||||||
name = "embassy-cli"
|
|
||||||
path = "src/bin/embassy-cli.rs"
|
|
||||||
|
|
||||||
[[bin]]
|
|
||||||
name = "avahi-alias"
|
|
||||||
path = "src/bin/avahi-alias.rs"
|
|
||||||
|
|
||||||
[features]
|
|
||||||
avahi = ["avahi-sys"]
|
|
||||||
default = ["avahi", "sound", "metal", "js_engine"]
|
|
||||||
dev = []
|
|
||||||
metal = []
|
|
||||||
sound = []
|
|
||||||
unstable = ["patch-db/unstable"]
|
|
||||||
|
|
||||||
[dependencies]
|
|
||||||
aes = { version = "0.7.5", features = ["ctr"] }
|
|
||||||
async-stream = "0.3.3"
|
|
||||||
async-trait = "0.1.56"
|
|
||||||
avahi-sys = { git = "https://github.com/Start9Labs/avahi-sys", version = "0.10.0", branch = "feature/dynamic-linking", features = [
|
|
||||||
"dynamic",
|
|
||||||
], optional = true }
|
|
||||||
base32 = "0.4.0"
|
|
||||||
base64 = "0.13.0"
|
|
||||||
base64ct = "1.5.1"
|
|
||||||
basic-cookies = "0.1.4"
|
|
||||||
bollard = "0.13.0"
|
|
||||||
chrono = { version = "0.4.19", features = ["serde"] }
|
|
||||||
clap = "3.2.8"
|
|
||||||
color-eyre = "0.6.1"
|
|
||||||
cookie_store = "0.16.1"
|
|
||||||
current_platform = "0.2.0"
|
|
||||||
digest = "0.10.3"
|
|
||||||
digest-old = { package = "digest", version = "0.9.0" }
|
|
||||||
divrem = "1.0.0"
|
|
||||||
ed25519 = { version = "1.5.2", features = ["pkcs8", "pem", "alloc"] }
|
|
||||||
ed25519-dalek = { version = "1.0.1", features = ["serde"] }
|
|
||||||
emver = { version = "0.1.6", features = ["serde"] }
|
|
||||||
fd-lock-rs = "0.1.4"
|
|
||||||
futures = "0.3.21"
|
|
||||||
git-version = "0.3.5"
|
|
||||||
helpers = { path = "../libs/helpers" }
|
|
||||||
hex = "0.4.3"
|
|
||||||
hmac = "0.12.1"
|
|
||||||
http = "0.2.8"
|
|
||||||
hyper = "0.14.20"
|
|
||||||
hyper-ws-listener = "0.2.0"
|
|
||||||
imbl = "2.0.0"
|
|
||||||
indexmap = { version = "1.9.1", features = ["serde"] }
|
|
||||||
isocountry = "0.3.2"
|
|
||||||
itertools = "0.10.3"
|
|
||||||
josekit = "0.8.1"
|
|
||||||
js_engine = { path = '../libs/js_engine', optional = true }
|
|
||||||
jsonpath_lib = "0.3.0"
|
|
||||||
lazy_static = "1.4.0"
|
|
||||||
libc = "0.2.126"
|
|
||||||
log = "0.4.17"
|
|
||||||
models = { version = "*", path = "../libs/models" }
|
|
||||||
nix = "0.25.0"
|
|
||||||
nom = "7.1.1"
|
|
||||||
num = "0.4.0"
|
|
||||||
num_enum = "0.5.7"
|
|
||||||
openssh-keys = "0.5.0"
|
|
||||||
openssl = { version = "0.10.41", features = ["vendored"] }
|
|
||||||
patch-db = { version = "*", path = "../patch-db/patch-db", features = [
|
|
||||||
"trace",
|
|
||||||
] }
|
|
||||||
pbkdf2 = "0.11.0"
|
|
||||||
pin-project = "1.0.11"
|
|
||||||
pkcs8 = { version = "0.9.0", features = ["std"] }
|
|
||||||
prettytable-rs = "0.9.0"
|
|
||||||
proptest = "1.0.0"
|
|
||||||
proptest-derive = "0.3.0"
|
|
||||||
rand = { version = "0.8.5", features = ["std"] }
|
|
||||||
rand-old = { package = "rand", version = "0.7.3" }
|
|
||||||
regex = "1.6.0"
|
|
||||||
reqwest = { version = "0.11.11", features = ["stream", "json", "socks"] }
|
|
||||||
reqwest_cookie_store = "0.4.0"
|
|
||||||
rpassword = "7.0.0"
|
|
||||||
rpc-toolkit = "0.2.1"
|
|
||||||
rust-argon2 = "1.0.0"
|
|
||||||
scopeguard = "1.1" # because avahi-sys fucks your shit up
|
|
||||||
serde = { version = "1.0.139", features = ["derive", "rc"] }
|
|
||||||
serde_cbor = { package = "ciborium", version = "0.2.0" }
|
|
||||||
serde_json = "1.0.82"
|
|
||||||
serde_toml = { package = "toml", version = "0.5.9" }
|
|
||||||
serde_with = { version = "2.0.1", features = ["macros", "json"] }
|
|
||||||
serde_yaml = "0.9.11"
|
|
||||||
sha2 = "0.10.2"
|
|
||||||
sha2-old = { package = "sha2", version = "0.9.9" }
|
|
||||||
simple-logging = "2.0.2"
|
|
||||||
sqlx = { version = "0.6.0", features = [
|
|
||||||
"chrono",
|
|
||||||
"offline",
|
|
||||||
"runtime-tokio-rustls",
|
|
||||||
"postgres",
|
|
||||||
] }
|
|
||||||
stderrlog = "0.5.3"
|
|
||||||
tar = "0.4.38"
|
|
||||||
thiserror = "1.0.31"
|
|
||||||
tokio = { version = "1.19.2", features = ["full"] }
|
|
||||||
tokio-stream = { version = "0.1.9", features = ["io-util", "sync"] }
|
|
||||||
tokio-tar = { git = "https://github.com/dr-bonez/tokio-tar.git" }
|
|
||||||
tokio-tungstenite = { version = "0.17.1", features = ["native-tls"] }
|
|
||||||
tokio-util = { version = "0.7.3", features = ["io"] }
|
|
||||||
torut = "0.2.1"
|
|
||||||
tracing = "0.1.35"
|
|
||||||
tracing-error = "0.2.0"
|
|
||||||
tracing-futures = "0.2.5"
|
|
||||||
tracing-subscriber = { version = "0.3.14", features = ["env-filter"] }
|
|
||||||
trust-dns-server = "0.22.0"
|
|
||||||
typed-builder = "0.10.0"
|
|
||||||
url = { version = "2.2.2", features = ["serde"] }
|
|
||||||
uuid = { version = "1.1.2", features = ["v4"] }
|
|
||||||
|
|
||||||
[profile.test]
|
|
||||||
opt-level = 3
|
|
||||||
|
|
||||||
[profile.dev.package.backtrace]
|
|
||||||
opt-level = 3
|
|
||||||
|
|
||||||
[profile.dev.package.sqlx-macros]
|
|
||||||
opt-level = 3
|
|
||||||
@@ -1,35 +0,0 @@
|
|||||||
# embassyOS Backend
|
|
||||||
|
|
||||||
- Requirements:
|
|
||||||
- [Install Rust](https://rustup.rs)
|
|
||||||
- Recommended: [rust-analyzer](https://rust-analyzer.github.io/)
|
|
||||||
- [Docker](https://docs.docker.com/get-docker/)
|
|
||||||
- [Rust ARM64 Build Container](https://github.com/Start9Labs/rust-arm-builder)
|
|
||||||
- Scripts (run withing the `./backend` directory)
|
|
||||||
- `build-prod.sh` - compiles a release build of the artifacts for running on ARM64
|
|
||||||
- `build-dev.sh` - compiles a development build of the artifacts for running on ARM64
|
|
||||||
- A Linux computer or VM
|
|
||||||
|
|
||||||
## Structure
|
|
||||||
|
|
||||||
The embassyOS backend is broken up into 4 different binaries:
|
|
||||||
|
|
||||||
- embassyd: This is the main workhorse of embassyOS - any new functionality you want will likely go here
|
|
||||||
- embassy-init: This is the component responsible for allowing you to set up your device, and handles system initialization on startup
|
|
||||||
- embassy-cli: This is a CLI tool that will allow you to issue commands to embassyd and control it similarly to the UI
|
|
||||||
- embassy-sdk: This is a CLI tool that aids in building and packaging services you wish to deploy to the Embassy
|
|
||||||
|
|
||||||
Finally there is a library `embassy` that supports all four of these tools.
|
|
||||||
|
|
||||||
See [here](/backend/Cargo.toml) for details.
|
|
||||||
|
|
||||||
## Building
|
|
||||||
|
|
||||||
You can build the entire operating system image using `make` from the root of the embassyOS project. This will subsequently invoke the build scripts above to actually create the requisite binaries and put them onto the final operating system image.
|
|
||||||
|
|
||||||
## Questions
|
|
||||||
|
|
||||||
If you have questions about how various pieces of the backend system work. Open an issue and tag the following people
|
|
||||||
|
|
||||||
- dr-bonez
|
|
||||||
- ProofOfKeags
|
|
||||||
@@ -1,24 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
set -e
|
|
||||||
shopt -s expand_aliases
|
|
||||||
|
|
||||||
if [ "$0" != "./build-dev.sh" ]; then
|
|
||||||
>&2 echo "Must be run from backend directory"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
USE_TTY=
|
|
||||||
if tty -s; then
|
|
||||||
USE_TTY="-it"
|
|
||||||
fi
|
|
||||||
|
|
||||||
alias 'rust-arm64-builder'='docker run $USE_TTY --rm -v "$HOME/.cargo/registry":/root/.cargo/registry -v "$(pwd)":/home/rust/src start9/rust-arm-cross:aarch64'
|
|
||||||
|
|
||||||
cd ..
|
|
||||||
rust-arm64-builder sh -c "(cd backend && cargo build --locked)"
|
|
||||||
cd backend
|
|
||||||
|
|
||||||
sudo chown -R $USER target
|
|
||||||
sudo chown -R $USER ~/.cargo
|
|
||||||
#rust-arm64-builder aarch64-linux-gnu-strip target/aarch64-unknown-linux-gnu/release/embassyd
|
|
||||||
@@ -1,23 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
set -e
|
|
||||||
shopt -s expand_aliases
|
|
||||||
|
|
||||||
if [ "$0" != "./build-portable-dev.sh" ]; then
|
|
||||||
>&2 echo "Must be run from backend directory"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
USE_TTY=
|
|
||||||
if tty -s; then
|
|
||||||
USE_TTY="-it"
|
|
||||||
fi
|
|
||||||
|
|
||||||
alias 'rust-musl-builder'='docker run $USE_TTY --rm -v "$HOME"/.cargo/registry:/root/.cargo/registry -v "$(pwd)":/home/rust/src start9/rust-musl-cross:x86_64-musl'
|
|
||||||
|
|
||||||
cd ..
|
|
||||||
rust-musl-builder sh -c "(cd backend && cargo +beta build --target=x86_64-unknown-linux-musl --no-default-features --locked)"
|
|
||||||
cd backend
|
|
||||||
|
|
||||||
sudo chown -R $USER target
|
|
||||||
sudo chown -R $USER ~/.cargo
|
|
||||||
@@ -1,23 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
set -e
|
|
||||||
shopt -s expand_aliases
|
|
||||||
|
|
||||||
if [ "$0" != "./build-portable.sh" ]; then
|
|
||||||
>&2 echo "Must be run from backend directory"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
USE_TTY=
|
|
||||||
if tty -s; then
|
|
||||||
USE_TTY="-it"
|
|
||||||
fi
|
|
||||||
|
|
||||||
alias 'rust-musl-builder'='docker run $USE_TTY --rm -v "$HOME"/.cargo/registry:/root/.cargo/registry -v "$(pwd)":/home/rust/src start9/rust-musl-cross:x86_64-musl'
|
|
||||||
|
|
||||||
cd ..
|
|
||||||
rust-musl-builder sh -c "(cd backend && cargo +beta build --release --target=x86_64-unknown-linux-musl --no-default-features --locked)"
|
|
||||||
cd backend
|
|
||||||
|
|
||||||
sudo chown -R $USER target
|
|
||||||
sudo chown -R $USER ~/.cargo
|
|
||||||
@@ -1,37 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
set -e
|
|
||||||
shopt -s expand_aliases
|
|
||||||
|
|
||||||
if [ "$0" != "./build-prod.sh" ]; then
|
|
||||||
>&2 echo "Must be run from backend directory"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
USE_TTY=
|
|
||||||
if tty -s; then
|
|
||||||
USE_TTY="-it"
|
|
||||||
fi
|
|
||||||
|
|
||||||
alias 'rust-arm64-builder'='docker run $USE_TTY --rm -v "$HOME/.cargo/registry":/root/.cargo/registry -v "$(pwd)":/home/rust/src -P start9/rust-arm-cross:aarch64'
|
|
||||||
|
|
||||||
cd ..
|
|
||||||
FLAGS=""
|
|
||||||
if [[ "$ENVIRONMENT" =~ (^|-)unstable($|-) ]]; then
|
|
||||||
FLAGS="unstable,$FLAGS"
|
|
||||||
fi
|
|
||||||
if [[ "$ENVIRONMENT" =~ (^|-)dev($|-) ]]; then
|
|
||||||
FLAGS="dev,$FLAGS"
|
|
||||||
fi
|
|
||||||
if [[ "$FLAGS" = "" ]]; then
|
|
||||||
rust-arm64-builder sh -c "(git config --global --add safe.directory '*'; cd backend && cargo build --release --locked)"
|
|
||||||
else
|
|
||||||
echo "FLAGS=$FLAGS"
|
|
||||||
rust-arm64-builder sh -c "(git config --global --add safe.directory '*'; cd backend && cargo build --release --features $FLAGS --locked)"
|
|
||||||
fi
|
|
||||||
cd backend
|
|
||||||
|
|
||||||
sudo chown -R $USER target
|
|
||||||
sudo chown -R $USER ~/.cargo
|
|
||||||
|
|
||||||
#rust-arm64-builder aarch64-linux-gnu-strip target/aarch64-unknown-linux-gnu/release/embassyd
|
|
||||||
@@ -1,22 +0,0 @@
|
|||||||
[licenses]
|
|
||||||
unlicensed = "warn"
|
|
||||||
allow-osi-fsf-free = "neither"
|
|
||||||
copyleft = "deny"
|
|
||||||
confidence-threshold = 0.93
|
|
||||||
allow = [
|
|
||||||
"Apache-2.0",
|
|
||||||
"Apache-2.0 WITH LLVM-exception",
|
|
||||||
"MIT",
|
|
||||||
"ISC",
|
|
||||||
"MPL-2.0",
|
|
||||||
"CC0-1.0",
|
|
||||||
"BSD-2-Clause",
|
|
||||||
"BSD-3-Clause",
|
|
||||||
"LGPL-3.0",
|
|
||||||
"OpenSSL",
|
|
||||||
]
|
|
||||||
|
|
||||||
clarify = [
|
|
||||||
{ name = "webpki", expression = "ISC", license-files = [ { path = "LICENSE", hash = 0x001c7e6c } ] },
|
|
||||||
{ name = "ring", expression = "OpenSSL", license-files = [ { path = "LICENSE", hash = 0xbd0eed23 } ] },
|
|
||||||
]
|
|
||||||
@@ -1,15 +0,0 @@
|
|||||||
[Unit]
|
|
||||||
Description=Embassy Init
|
|
||||||
After=network.target
|
|
||||||
Requires=network.target
|
|
||||||
Wants=avahi-daemon.service nginx.service tor.service
|
|
||||||
|
|
||||||
[Service]
|
|
||||||
Type=oneshot
|
|
||||||
Environment=RUST_LOG=embassy_init=debug,embassy=debug,js_engine=debug,patch_db=warn
|
|
||||||
ExecStart=/usr/local/bin/embassy-init
|
|
||||||
RemainAfterExit=true
|
|
||||||
StandardOutput=append:/var/log/embassy-init.log
|
|
||||||
|
|
||||||
[Install]
|
|
||||||
WantedBy=embassyd.service
|
|
||||||
@@ -1,17 +0,0 @@
|
|||||||
[Unit]
|
|
||||||
Description=Embassy Daemon
|
|
||||||
After=embassy-init.service
|
|
||||||
Requires=embassy-init.service
|
|
||||||
|
|
||||||
[Service]
|
|
||||||
Type=simple
|
|
||||||
Environment=RUST_LOG=embassyd=debug,embassy=debug,js_engine=debug,patch_db=warn
|
|
||||||
ExecStart=/usr/local/bin/embassyd
|
|
||||||
Restart=always
|
|
||||||
RestartSec=3
|
|
||||||
ManagedOOMPreference=avoid
|
|
||||||
CPUAccounting=true
|
|
||||||
CPUWeight=1000
|
|
||||||
|
|
||||||
[Install]
|
|
||||||
WantedBy=multi-user.target
|
|
||||||
@@ -1,11 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
set -e
|
|
||||||
shopt -s expand_aliases
|
|
||||||
|
|
||||||
if [ "$0" != "./install-sdk.sh" ]; then
|
|
||||||
>&2 echo "Must be run from backend directory"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
cargo install --bin=embassy-sdk --bin=embassy-cli --path=. --no-default-features --features=js_engine --locked
|
|
||||||
@@ -1,810 +0,0 @@
|
|||||||
{
|
|
||||||
"db": "PostgreSQL",
|
|
||||||
"094882d4d46d52e814f9aaf5fae172a5dd745b06cbde347f47b18e6498167269": {
|
|
||||||
"describe": {
|
|
||||||
"columns": [],
|
|
||||||
"nullable": [],
|
|
||||||
"parameters": {
|
|
||||||
"Left": [
|
|
||||||
"Text",
|
|
||||||
"Text",
|
|
||||||
"Text"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"query": "UPDATE certificates SET priv_key_pem = $1, certificate_pem = $2, updated_at = now() WHERE lookup_string = $3"
|
|
||||||
},
|
|
||||||
"165daa7d6a60cb42122373b2c5ac7d39399bcc99992f0002ee7bfef50a8daceb": {
|
|
||||||
"describe": {
|
|
||||||
"columns": [],
|
|
||||||
"nullable": [],
|
|
||||||
"parameters": {
|
|
||||||
"Left": []
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"query": "DELETE FROM certificates WHERE id = 0 OR id = 1;"
|
|
||||||
},
|
|
||||||
"1f7936d27d63f01118ecd6f824e8a79607ed2b6e6def23f3e2487466dd2ddfe1": {
|
|
||||||
"describe": {
|
|
||||||
"columns": [],
|
|
||||||
"nullable": [],
|
|
||||||
"parameters": {
|
|
||||||
"Left": [
|
|
||||||
"Text",
|
|
||||||
"Text",
|
|
||||||
"Text"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"query": "INSERT INTO certificates (priv_key_pem, certificate_pem, lookup_string, created_at, updated_at) VALUES ($1, $2, $3, now(), now())"
|
|
||||||
},
|
|
||||||
"21471490cdc3adb206274cc68e1ea745ffa5da4479478c1fd2158a45324b1930": {
|
|
||||||
"describe": {
|
|
||||||
"columns": [],
|
|
||||||
"nullable": [],
|
|
||||||
"parameters": {
|
|
||||||
"Left": [
|
|
||||||
"Text"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"query": "DELETE FROM ssh_keys WHERE fingerprint = $1"
|
|
||||||
},
|
|
||||||
"22613628ff50341fdc35366e194fdcd850118824763cfe0dfff68dadc72167e9": {
|
|
||||||
"describe": {
|
|
||||||
"columns": [],
|
|
||||||
"nullable": [],
|
|
||||||
"parameters": {
|
|
||||||
"Left": [
|
|
||||||
"Text",
|
|
||||||
"Text",
|
|
||||||
"Bytea"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"query": "INSERT INTO tor (package, interface, key) VALUES ($1, $2, $3) ON CONFLICT (package, interface) DO UPDATE SET key = $3"
|
|
||||||
},
|
|
||||||
"28ea34bbde836e0618c5fc9bb7c36e463c20c841a7d6a0eb15be0f24f4a928ec": {
|
|
||||||
"describe": {
|
|
||||||
"columns": [
|
|
||||||
{
|
|
||||||
"name": "hostname",
|
|
||||||
"ordinal": 0,
|
|
||||||
"type_info": "Text"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "path",
|
|
||||||
"ordinal": 1,
|
|
||||||
"type_info": "Text"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "username",
|
|
||||||
"ordinal": 2,
|
|
||||||
"type_info": "Text"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "password",
|
|
||||||
"ordinal": 3,
|
|
||||||
"type_info": "Text"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"nullable": [
|
|
||||||
false,
|
|
||||||
false,
|
|
||||||
false,
|
|
||||||
true
|
|
||||||
],
|
|
||||||
"parameters": {
|
|
||||||
"Left": [
|
|
||||||
"Int4"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"query": "SELECT hostname, path, username, password FROM cifs_shares WHERE id = $1"
|
|
||||||
},
|
|
||||||
"3502e58f2ab48fb4566d21c920c096f81acfa3ff0d02f970626a4dcd67bac71d": {
|
|
||||||
"describe": {
|
|
||||||
"columns": [
|
|
||||||
{
|
|
||||||
"name": "tor_key",
|
|
||||||
"ordinal": 0,
|
|
||||||
"type_info": "Bytea"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"nullable": [
|
|
||||||
false
|
|
||||||
],
|
|
||||||
"parameters": {
|
|
||||||
"Left": []
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"query": "SELECT tor_key FROM account"
|
|
||||||
},
|
|
||||||
"4099028a5c0de578255bf54a67cef6cb0f1e9a4e158260700f1639dd4b438997": {
|
|
||||||
"describe": {
|
|
||||||
"columns": [
|
|
||||||
{
|
|
||||||
"name": "fingerprint",
|
|
||||||
"ordinal": 0,
|
|
||||||
"type_info": "Text"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "openssh_pubkey",
|
|
||||||
"ordinal": 1,
|
|
||||||
"type_info": "Text"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "created_at",
|
|
||||||
"ordinal": 2,
|
|
||||||
"type_info": "Text"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"nullable": [
|
|
||||||
false,
|
|
||||||
false,
|
|
||||||
false
|
|
||||||
],
|
|
||||||
"parameters": {
|
|
||||||
"Left": [
|
|
||||||
"Text"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"query": "SELECT * FROM ssh_keys WHERE fingerprint = $1"
|
|
||||||
},
|
|
||||||
"46815a4ac2c43e1dfbab3c0017ed09d5c833062e523205db4245a5218b2562b8": {
|
|
||||||
"describe": {
|
|
||||||
"columns": [
|
|
||||||
{
|
|
||||||
"name": "priv_key_pem",
|
|
||||||
"ordinal": 0,
|
|
||||||
"type_info": "Text"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "certificate_pem",
|
|
||||||
"ordinal": 1,
|
|
||||||
"type_info": "Text"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"nullable": [
|
|
||||||
false,
|
|
||||||
false
|
|
||||||
],
|
|
||||||
"parameters": {
|
|
||||||
"Left": [
|
|
||||||
"Text"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"query": "SELECT priv_key_pem, certificate_pem FROM certificates WHERE lookup_string = $1"
|
|
||||||
},
|
|
||||||
"4691e3a2ce80b59009ac17124f54f925f61dc5ea371903e62cdffa5d7b67ca96": {
|
|
||||||
"describe": {
|
|
||||||
"columns": [
|
|
||||||
{
|
|
||||||
"name": "id",
|
|
||||||
"ordinal": 0,
|
|
||||||
"type_info": "Text"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "logged_in",
|
|
||||||
"ordinal": 1,
|
|
||||||
"type_info": "Timestamp"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "logged_out",
|
|
||||||
"ordinal": 2,
|
|
||||||
"type_info": "Timestamp"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "last_active",
|
|
||||||
"ordinal": 3,
|
|
||||||
"type_info": "Timestamp"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "user_agent",
|
|
||||||
"ordinal": 4,
|
|
||||||
"type_info": "Text"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "metadata",
|
|
||||||
"ordinal": 5,
|
|
||||||
"type_info": "Text"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"nullable": [
|
|
||||||
false,
|
|
||||||
false,
|
|
||||||
true,
|
|
||||||
false,
|
|
||||||
true,
|
|
||||||
false
|
|
||||||
],
|
|
||||||
"parameters": {
|
|
||||||
"Left": []
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"query": "SELECT * FROM session WHERE logged_out IS NULL OR logged_out > CURRENT_TIMESTAMP"
|
|
||||||
},
|
|
||||||
"4bcfbefb1eb3181343871a1cd7fc3afb81c2be5c681cfa8b4be0ce70610e9c3a": {
|
|
||||||
"describe": {
|
|
||||||
"columns": [],
|
|
||||||
"nullable": [],
|
|
||||||
"parameters": {
|
|
||||||
"Left": [
|
|
||||||
"Text"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"query": "UPDATE session SET logged_out = CURRENT_TIMESTAMP WHERE id = $1"
|
|
||||||
},
|
|
||||||
"548448e8ed8bcdf9efdc813d65af2cc55064685293b936f0f09e07f91a328eb9": {
|
|
||||||
"describe": {
|
|
||||||
"columns": [
|
|
||||||
{
|
|
||||||
"name": "setval",
|
|
||||||
"ordinal": 0,
|
|
||||||
"type_info": "Int8"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"nullable": [
|
|
||||||
null
|
|
||||||
],
|
|
||||||
"parameters": {
|
|
||||||
"Left": []
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"query": "SELECT setval('certificates_id_seq', GREATEST(MAX(id) + 1, nextval('certificates_id_seq') - 1)) FROM certificates"
|
|
||||||
},
|
|
||||||
"629be61c3c341c131ddbbff0293a83dbc6afd07cae69d246987f62cf0cc35c2a": {
|
|
||||||
"describe": {
|
|
||||||
"columns": [
|
|
||||||
{
|
|
||||||
"name": "password",
|
|
||||||
"ordinal": 0,
|
|
||||||
"type_info": "Text"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"nullable": [
|
|
||||||
false
|
|
||||||
],
|
|
||||||
"parameters": {
|
|
||||||
"Left": []
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"query": "SELECT password FROM account"
|
|
||||||
},
|
|
||||||
"687688055e63d27123cdc89a5bbbd8361776290a9411d527eaf1fdb40bef399d": {
|
|
||||||
"describe": {
|
|
||||||
"columns": [
|
|
||||||
{
|
|
||||||
"name": "key",
|
|
||||||
"ordinal": 0,
|
|
||||||
"type_info": "Bytea"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"nullable": [
|
|
||||||
false
|
|
||||||
],
|
|
||||||
"parameters": {
|
|
||||||
"Left": [
|
|
||||||
"Text",
|
|
||||||
"Text"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"query": "SELECT key FROM tor WHERE package = $1 AND interface = $2"
|
|
||||||
},
|
|
||||||
"6c96d76bffcc5f03290d8d8544a58521345ed2a843a509b17bbcd6257bb81821": {
|
|
||||||
"describe": {
|
|
||||||
"columns": [
|
|
||||||
{
|
|
||||||
"name": "priv_key_pem",
|
|
||||||
"ordinal": 0,
|
|
||||||
"type_info": "Text"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "certificate_pem",
|
|
||||||
"ordinal": 1,
|
|
||||||
"type_info": "Text"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"nullable": [
|
|
||||||
false,
|
|
||||||
false
|
|
||||||
],
|
|
||||||
"parameters": {
|
|
||||||
"Left": []
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"query": "SELECT priv_key_pem, certificate_pem FROM certificates WHERE id = 1;"
|
|
||||||
},
|
|
||||||
"6d35ccf780fb2bb62586dd1d3df9c1550a41ee580dad3f49d35cb843ebef10ca": {
|
|
||||||
"describe": {
|
|
||||||
"columns": [],
|
|
||||||
"nullable": [],
|
|
||||||
"parameters": {
|
|
||||||
"Left": [
|
|
||||||
"Text"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"query": "UPDATE session SET last_active = CURRENT_TIMESTAMP WHERE id = $1 AND logged_out IS NULL OR logged_out > CURRENT_TIMESTAMP"
|
|
||||||
},
|
|
||||||
"7b64f032d507e8ffe37c41f4c7ad514a66c421a11ab04c26d89a7aa8f6b67210": {
|
|
||||||
"describe": {
|
|
||||||
"columns": [
|
|
||||||
{
|
|
||||||
"name": "id",
|
|
||||||
"ordinal": 0,
|
|
||||||
"type_info": "Int4"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "package_id",
|
|
||||||
"ordinal": 1,
|
|
||||||
"type_info": "Text"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "created_at",
|
|
||||||
"ordinal": 2,
|
|
||||||
"type_info": "Timestamp"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "code",
|
|
||||||
"ordinal": 3,
|
|
||||||
"type_info": "Int4"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "level",
|
|
||||||
"ordinal": 4,
|
|
||||||
"type_info": "Text"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "title",
|
|
||||||
"ordinal": 5,
|
|
||||||
"type_info": "Text"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "message",
|
|
||||||
"ordinal": 6,
|
|
||||||
"type_info": "Text"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "data",
|
|
||||||
"ordinal": 7,
|
|
||||||
"type_info": "Text"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"nullable": [
|
|
||||||
false,
|
|
||||||
true,
|
|
||||||
false,
|
|
||||||
false,
|
|
||||||
false,
|
|
||||||
false,
|
|
||||||
false,
|
|
||||||
true
|
|
||||||
],
|
|
||||||
"parameters": {
|
|
||||||
"Left": [
|
|
||||||
"Int4",
|
|
||||||
"Int8"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"query": "SELECT id, package_id, created_at, code, level, title, message, data FROM notifications WHERE id < $1 ORDER BY id DESC LIMIT $2"
|
|
||||||
},
|
|
||||||
"7e0649d839927e57fa03ee51a2c9f96a8bdb0fc97ee8a3c6df1069e1e2b98576": {
|
|
||||||
"describe": {
|
|
||||||
"columns": [],
|
|
||||||
"nullable": [],
|
|
||||||
"parameters": {
|
|
||||||
"Left": [
|
|
||||||
"Text"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"query": "DELETE FROM tor WHERE package = $1"
|
|
||||||
},
|
|
||||||
"8951b9126fbf60dbb5997241e11e3526b70bccf3e407327917294a993bc17ed5": {
|
|
||||||
"describe": {
|
|
||||||
"columns": [],
|
|
||||||
"nullable": [],
|
|
||||||
"parameters": {
|
|
||||||
"Left": [
|
|
||||||
"Text",
|
|
||||||
"Text",
|
|
||||||
"Bytea"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"query": "INSERT INTO tor (package, interface, key) VALUES ($1, $2, $3) ON CONFLICT (package, interface) DO NOTHING"
|
|
||||||
},
|
|
||||||
"94d471bb374b4965c6cbedf8c17bbf6bea226d38efaf6559923c79a36d5ca08c": {
|
|
||||||
"describe": {
|
|
||||||
"columns": [
|
|
||||||
{
|
|
||||||
"name": "id",
|
|
||||||
"ordinal": 0,
|
|
||||||
"type_info": "Int4"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "package_id",
|
|
||||||
"ordinal": 1,
|
|
||||||
"type_info": "Text"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "created_at",
|
|
||||||
"ordinal": 2,
|
|
||||||
"type_info": "Timestamp"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "code",
|
|
||||||
"ordinal": 3,
|
|
||||||
"type_info": "Int4"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "level",
|
|
||||||
"ordinal": 4,
|
|
||||||
"type_info": "Text"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "title",
|
|
||||||
"ordinal": 5,
|
|
||||||
"type_info": "Text"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "message",
|
|
||||||
"ordinal": 6,
|
|
||||||
"type_info": "Text"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "data",
|
|
||||||
"ordinal": 7,
|
|
||||||
"type_info": "Text"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"nullable": [
|
|
||||||
false,
|
|
||||||
true,
|
|
||||||
false,
|
|
||||||
false,
|
|
||||||
false,
|
|
||||||
false,
|
|
||||||
false,
|
|
||||||
true
|
|
||||||
],
|
|
||||||
"parameters": {
|
|
||||||
"Left": [
|
|
||||||
"Int8"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"query": "SELECT id, package_id, created_at, code, level, title, message, data FROM notifications ORDER BY id DESC LIMIT $1"
|
|
||||||
},
|
|
||||||
"95c4ab4c645f3302568c6ff13d85ab58252362694cf0f56999bf60194d20583a": {
|
|
||||||
"describe": {
|
|
||||||
"columns": [
|
|
||||||
{
|
|
||||||
"name": "id",
|
|
||||||
"ordinal": 0,
|
|
||||||
"type_info": "Int4"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "hostname",
|
|
||||||
"ordinal": 1,
|
|
||||||
"type_info": "Text"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "path",
|
|
||||||
"ordinal": 2,
|
|
||||||
"type_info": "Text"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "username",
|
|
||||||
"ordinal": 3,
|
|
||||||
"type_info": "Text"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "password",
|
|
||||||
"ordinal": 4,
|
|
||||||
"type_info": "Text"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"nullable": [
|
|
||||||
false,
|
|
||||||
false,
|
|
||||||
false,
|
|
||||||
false,
|
|
||||||
true
|
|
||||||
],
|
|
||||||
"parameters": {
|
|
||||||
"Left": []
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"query": "SELECT id, hostname, path, username, password FROM cifs_shares"
|
|
||||||
},
|
|
||||||
"a60d6e66719325b08dc4ecfacaf337527233c84eee758ac9be967906e5841d27": {
|
|
||||||
"describe": {
|
|
||||||
"columns": [],
|
|
||||||
"nullable": [],
|
|
||||||
"parameters": {
|
|
||||||
"Left": [
|
|
||||||
"Int4"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"query": "DELETE FROM cifs_shares WHERE id = $1"
|
|
||||||
},
|
|
||||||
"a645d636be810a4ba61dcadf22e90de6e9baf3614aa9e97f053ff480cb3118a2": {
|
|
||||||
"describe": {
|
|
||||||
"columns": [],
|
|
||||||
"nullable": [],
|
|
||||||
"parameters": {
|
|
||||||
"Left": [
|
|
||||||
"Text",
|
|
||||||
"Bytea"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"query": "INSERT INTO tor (package, interface, key) VALUES ($1, 'main', $2) ON CONFLICT (package, interface) DO UPDATE SET key = $2"
|
|
||||||
},
|
|
||||||
"a6645d91f76b3d5fac2191ea3bec5dab7d7d124715fde02e6a816fa5dbc7acf2": {
|
|
||||||
"describe": {
|
|
||||||
"columns": [],
|
|
||||||
"nullable": [],
|
|
||||||
"parameters": {
|
|
||||||
"Left": [
|
|
||||||
"Int4",
|
|
||||||
"Text",
|
|
||||||
"Bytea"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"query": "INSERT INTO account (id, password, tor_key) VALUES ($1, $2, $3) ON CONFLICT (id) DO UPDATE SET password = $2, tor_key = $3"
|
|
||||||
},
|
|
||||||
"a6b0c8909a3a5d6d9156aebfb359424e6b5a1d1402e028219e21726f1ebd282e": {
|
|
||||||
"describe": {
|
|
||||||
"columns": [
|
|
||||||
{
|
|
||||||
"name": "fingerprint",
|
|
||||||
"ordinal": 0,
|
|
||||||
"type_info": "Text"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "openssh_pubkey",
|
|
||||||
"ordinal": 1,
|
|
||||||
"type_info": "Text"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "created_at",
|
|
||||||
"ordinal": 2,
|
|
||||||
"type_info": "Text"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"nullable": [
|
|
||||||
false,
|
|
||||||
false,
|
|
||||||
false
|
|
||||||
],
|
|
||||||
"parameters": {
|
|
||||||
"Left": []
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"query": "SELECT fingerprint, openssh_pubkey, created_at FROM ssh_keys"
|
|
||||||
},
|
|
||||||
"b1147beaaabbed89f2ab8c1e13ec4393a9a8fde2833cf096af766a979d94dee6": {
|
|
||||||
"describe": {
|
|
||||||
"columns": [],
|
|
||||||
"nullable": [],
|
|
||||||
"parameters": {
|
|
||||||
"Left": [
|
|
||||||
"Text",
|
|
||||||
"Text",
|
|
||||||
"Text",
|
|
||||||
"Text",
|
|
||||||
"Int4"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"query": "UPDATE cifs_shares SET hostname = $1, path = $2, username = $3, password = $4 WHERE id = $5"
|
|
||||||
},
|
|
||||||
"cec8112be0ebc02ef7e651631be09efe26d1677b5b8aa95ceb3a92aff1afdbcc": {
|
|
||||||
"describe": {
|
|
||||||
"columns": [],
|
|
||||||
"nullable": [],
|
|
||||||
"parameters": {
|
|
||||||
"Left": [
|
|
||||||
"Text",
|
|
||||||
"Text"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"query": "INSERT INTO certificates (id, priv_key_pem, certificate_pem, lookup_string, created_at, updated_at) VALUES (1, $1, $2, NULL, now(), now())"
|
|
||||||
},
|
|
||||||
"d5117054072476377f3c4f040ea429d4c9b2cf534e76f35c80a2bf60e8599cca": {
|
|
||||||
"describe": {
|
|
||||||
"columns": [
|
|
||||||
{
|
|
||||||
"name": "openssh_pubkey",
|
|
||||||
"ordinal": 0,
|
|
||||||
"type_info": "Text"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"nullable": [
|
|
||||||
false
|
|
||||||
],
|
|
||||||
"parameters": {
|
|
||||||
"Left": []
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"query": "SELECT openssh_pubkey FROM ssh_keys"
|
|
||||||
},
|
|
||||||
"da71f94b29798d1738d2b10b9a721ea72db8cfb362e7181c8226d9297507c62b": {
|
|
||||||
"describe": {
|
|
||||||
"columns": [],
|
|
||||||
"nullable": [],
|
|
||||||
"parameters": {
|
|
||||||
"Left": [
|
|
||||||
"Text",
|
|
||||||
"Int4",
|
|
||||||
"Text",
|
|
||||||
"Text",
|
|
||||||
"Text",
|
|
||||||
"Text"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"query": "INSERT INTO notifications (package_id, code, level, title, message, data) VALUES ($1, $2, $3, $4, $5, $6)"
|
|
||||||
},
|
|
||||||
"df4428ccb891bd791824bcd7990550cc9651e1cfaab1db33833ff7959d113b2c": {
|
|
||||||
"describe": {
|
|
||||||
"columns": [],
|
|
||||||
"nullable": [],
|
|
||||||
"parameters": {
|
|
||||||
"Left": [
|
|
||||||
"Text",
|
|
||||||
"Text"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"query": "INSERT INTO certificates (id, priv_key_pem, certificate_pem, lookup_string, created_at, updated_at) VALUES (0, $1, $2, NULL, now(), now())"
|
|
||||||
},
|
|
||||||
"e185203cf84e43b801dfb23b4159e34aeaef1154dcd3d6811ab504915497ccf7": {
|
|
||||||
"describe": {
|
|
||||||
"columns": [],
|
|
||||||
"nullable": [],
|
|
||||||
"parameters": {
|
|
||||||
"Left": [
|
|
||||||
"Int4"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"query": "DELETE FROM notifications WHERE id = $1"
|
|
||||||
},
|
|
||||||
"e25e53c45c5a494a45cdb4d145de507df6f322ac6706e71b86895f1c64195f41": {
|
|
||||||
"describe": {
|
|
||||||
"columns": [],
|
|
||||||
"nullable": [],
|
|
||||||
"parameters": {
|
|
||||||
"Left": [
|
|
||||||
"Text"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"query": "UPDATE account SET password = $1"
|
|
||||||
},
|
|
||||||
"e5843c5b0e7819b29aa1abf2266799bd4f82e761837b526a0972c3d4439a264d": {
|
|
||||||
"describe": {
|
|
||||||
"columns": [],
|
|
||||||
"nullable": [],
|
|
||||||
"parameters": {
|
|
||||||
"Left": [
|
|
||||||
"Text",
|
|
||||||
"Text",
|
|
||||||
"Text"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"query": "INSERT INTO session (id, user_agent, metadata) VALUES ($1, $2, $3)"
|
|
||||||
},
|
|
||||||
"e85749336fce4afaf16627bee8cfcb70be6f189ea7d1f05f9a26bead4be11839": {
|
|
||||||
"describe": {
|
|
||||||
"columns": [
|
|
||||||
{
|
|
||||||
"name": "interface",
|
|
||||||
"ordinal": 0,
|
|
||||||
"type_info": "Text"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "key",
|
|
||||||
"ordinal": 1,
|
|
||||||
"type_info": "Bytea"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"nullable": [
|
|
||||||
false,
|
|
||||||
false
|
|
||||||
],
|
|
||||||
"parameters": {
|
|
||||||
"Left": [
|
|
||||||
"Text"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"query": "SELECT interface, key FROM tor WHERE package = $1"
|
|
||||||
},
|
|
||||||
"eb750adaa305bdbf3c5b70aaf59139c7b7569602adb58f2d6b3a94da4f167b0a": {
|
|
||||||
"describe": {
|
|
||||||
"columns": [],
|
|
||||||
"nullable": [],
|
|
||||||
"parameters": {
|
|
||||||
"Left": [
|
|
||||||
"Int4"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"query": "DELETE FROM notifications WHERE id < $1"
|
|
||||||
},
|
|
||||||
"ecc765d8205c0876956f95f76944ac6a5f34dd820c4073b7728c7067aab9fded": {
|
|
||||||
"describe": {
|
|
||||||
"columns": [
|
|
||||||
{
|
|
||||||
"name": "id",
|
|
||||||
"ordinal": 0,
|
|
||||||
"type_info": "Int4"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"nullable": [
|
|
||||||
false
|
|
||||||
],
|
|
||||||
"parameters": {
|
|
||||||
"Left": [
|
|
||||||
"Text",
|
|
||||||
"Text",
|
|
||||||
"Text",
|
|
||||||
"Text"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"query": "INSERT INTO cifs_shares (hostname, path, username, password) VALUES ($1, $2, $3, $4) RETURNING id"
|
|
||||||
},
|
|
||||||
"ed848affa5bf92997cd441e3a50b3616b6724df3884bd9d199b3225e0bea8a54": {
|
|
||||||
"describe": {
|
|
||||||
"columns": [
|
|
||||||
{
|
|
||||||
"name": "priv_key_pem",
|
|
||||||
"ordinal": 0,
|
|
||||||
"type_info": "Text"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "certificate_pem",
|
|
||||||
"ordinal": 1,
|
|
||||||
"type_info": "Text"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"nullable": [
|
|
||||||
false,
|
|
||||||
false
|
|
||||||
],
|
|
||||||
"parameters": {
|
|
||||||
"Left": []
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"query": "SELECT priv_key_pem, certificate_pem FROM certificates WHERE id = 0;"
|
|
||||||
},
|
|
||||||
"f6d1c5ef0f9d9577bea8382318967b9deb46da75788c7fe6082b43821c22d556": {
|
|
||||||
"describe": {
|
|
||||||
"columns": [],
|
|
||||||
"nullable": [],
|
|
||||||
"parameters": {
|
|
||||||
"Left": [
|
|
||||||
"Text",
|
|
||||||
"Text",
|
|
||||||
"Text"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"query": "INSERT INTO ssh_keys (fingerprint, openssh_pubkey, created_at) VALUES ($1, $2, $3)"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,161 +0,0 @@
|
|||||||
use std::collections::{BTreeMap, BTreeSet};
|
|
||||||
|
|
||||||
use clap::ArgMatches;
|
|
||||||
use color_eyre::eyre::eyre;
|
|
||||||
use indexmap::IndexSet;
|
|
||||||
pub use models::ActionId;
|
|
||||||
use rpc_toolkit::command;
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
use tracing::instrument;
|
|
||||||
|
|
||||||
use crate::config::{Config, ConfigSpec};
|
|
||||||
use crate::context::RpcContext;
|
|
||||||
use crate::id::ImageId;
|
|
||||||
use crate::procedure::{PackageProcedure, ProcedureName};
|
|
||||||
use crate::s9pk::manifest::PackageId;
|
|
||||||
use crate::util::serde::{display_serializable, parse_stdin_deserializable, IoFormat};
|
|
||||||
use crate::util::Version;
|
|
||||||
use crate::volume::Volumes;
|
|
||||||
use crate::{Error, ResultExt};
|
|
||||||
#[derive(Clone, Debug, Default, Deserialize, Serialize)]
|
|
||||||
pub struct Actions(pub BTreeMap<ActionId, Action>);
|
|
||||||
|
|
||||||
#[derive(Debug, Serialize, Deserialize)]
|
|
||||||
#[serde(tag = "version")]
|
|
||||||
pub enum ActionResult {
|
|
||||||
#[serde(rename = "0")]
|
|
||||||
V0(ActionResultV0),
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Serialize, Deserialize)]
|
|
||||||
pub struct ActionResultV0 {
|
|
||||||
pub message: String,
|
|
||||||
pub value: Option<String>,
|
|
||||||
pub copyable: bool,
|
|
||||||
pub qr: bool,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq, Deserialize, Serialize)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
pub enum DockerStatus {
|
|
||||||
Running,
|
|
||||||
Stopped,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone, Debug, Deserialize, Serialize)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
pub struct Action {
|
|
||||||
pub name: String,
|
|
||||||
pub description: String,
|
|
||||||
#[serde(default)]
|
|
||||||
pub warning: Option<String>,
|
|
||||||
pub implementation: PackageProcedure,
|
|
||||||
pub allowed_statuses: IndexSet<DockerStatus>,
|
|
||||||
#[serde(default)]
|
|
||||||
pub input_spec: ConfigSpec,
|
|
||||||
}
|
|
||||||
impl Action {
|
|
||||||
#[instrument]
|
|
||||||
pub fn validate(
|
|
||||||
&self,
|
|
||||||
eos_version: &Version,
|
|
||||||
volumes: &Volumes,
|
|
||||||
image_ids: &BTreeSet<ImageId>,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
self.implementation
|
|
||||||
.validate(eos_version, volumes, image_ids, true)
|
|
||||||
.with_ctx(|_| {
|
|
||||||
(
|
|
||||||
crate::ErrorKind::ValidateS9pk,
|
|
||||||
format!("Action {}", self.name),
|
|
||||||
)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
#[instrument(skip(ctx))]
|
|
||||||
pub async fn execute(
|
|
||||||
&self,
|
|
||||||
ctx: &RpcContext,
|
|
||||||
pkg_id: &PackageId,
|
|
||||||
pkg_version: &Version,
|
|
||||||
action_id: &ActionId,
|
|
||||||
volumes: &Volumes,
|
|
||||||
input: Option<Config>,
|
|
||||||
) -> Result<ActionResult, Error> {
|
|
||||||
if let Some(ref input) = input {
|
|
||||||
self.input_spec
|
|
||||||
.matches(&input)
|
|
||||||
.with_kind(crate::ErrorKind::ConfigSpecViolation)?;
|
|
||||||
}
|
|
||||||
self.implementation
|
|
||||||
.execute(
|
|
||||||
ctx,
|
|
||||||
pkg_id,
|
|
||||||
pkg_version,
|
|
||||||
ProcedureName::Action(action_id.clone()),
|
|
||||||
volumes,
|
|
||||||
input,
|
|
||||||
true,
|
|
||||||
None,
|
|
||||||
)
|
|
||||||
.await?
|
|
||||||
.map_err(|e| Error::new(eyre!("{}", e.1), crate::ErrorKind::Action))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn display_action_result(action_result: ActionResult, matches: &ArgMatches) {
|
|
||||||
if matches.is_present("format") {
|
|
||||||
return display_serializable(action_result, matches);
|
|
||||||
}
|
|
||||||
match action_result {
|
|
||||||
ActionResult::V0(ar) => {
|
|
||||||
println!(
|
|
||||||
"{}: {}",
|
|
||||||
ar.message,
|
|
||||||
serde_json::to_string(&ar.value).unwrap()
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[command(about = "Executes an action", display(display_action_result))]
|
|
||||||
#[instrument(skip(ctx))]
|
|
||||||
pub async fn action(
|
|
||||||
#[context] ctx: RpcContext,
|
|
||||||
#[arg(rename = "id")] pkg_id: PackageId,
|
|
||||||
#[arg(rename = "action-id")] action_id: ActionId,
|
|
||||||
#[arg(stdin, parse(parse_stdin_deserializable))] input: Option<Config>,
|
|
||||||
#[allow(unused_variables)]
|
|
||||||
#[arg(long = "format")]
|
|
||||||
format: Option<IoFormat>,
|
|
||||||
) -> Result<ActionResult, Error> {
|
|
||||||
let mut db = ctx.db.handle();
|
|
||||||
let manifest = crate::db::DatabaseModel::new()
|
|
||||||
.package_data()
|
|
||||||
.idx_model(&pkg_id)
|
|
||||||
.and_then(|p| p.installed())
|
|
||||||
.expect(&mut db)
|
|
||||||
.await
|
|
||||||
.with_kind(crate::ErrorKind::NotFound)?
|
|
||||||
.manifest()
|
|
||||||
.get(&mut db, true)
|
|
||||||
.await?
|
|
||||||
.to_owned();
|
|
||||||
if let Some(action) = manifest.actions.0.get(&action_id) {
|
|
||||||
action
|
|
||||||
.execute(
|
|
||||||
&ctx,
|
|
||||||
&manifest.id,
|
|
||||||
&manifest.version,
|
|
||||||
&action_id,
|
|
||||||
&manifest.volumes,
|
|
||||||
input,
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
} else {
|
|
||||||
Err(Error::new(
|
|
||||||
eyre!("Action not found in manifest"),
|
|
||||||
crate::ErrorKind::NotFound,
|
|
||||||
))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,373 +0,0 @@
|
|||||||
use std::collections::BTreeMap;
|
|
||||||
use std::marker::PhantomData;
|
|
||||||
|
|
||||||
use chrono::{DateTime, Utc};
|
|
||||||
use clap::ArgMatches;
|
|
||||||
use color_eyre::eyre::eyre;
|
|
||||||
use patch_db::{DbHandle, LockReceipt};
|
|
||||||
use rpc_toolkit::command;
|
|
||||||
use rpc_toolkit::command_helpers::prelude::{RequestParts, ResponseParts};
|
|
||||||
use rpc_toolkit::yajrc::RpcError;
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
use serde_json::Value;
|
|
||||||
use sqlx::{Executor, Postgres};
|
|
||||||
use tracing::instrument;
|
|
||||||
|
|
||||||
use crate::context::{CliContext, RpcContext};
|
|
||||||
use crate::middleware::auth::{AsLogoutSessionId, HasLoggedOutSessions, HashSessionToken};
|
|
||||||
use crate::util::display_none;
|
|
||||||
use crate::util::serde::{display_serializable, IoFormat};
|
|
||||||
use crate::{ensure_code, Error, ResultExt};
|
|
||||||
|
|
||||||
#[command(subcommands(login, logout, session, reset_password))]
|
|
||||||
pub fn auth() -> Result<(), Error> {
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn cli_metadata() -> Value {
|
|
||||||
serde_json::json!({
|
|
||||||
"platforms": ["cli"],
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn parse_metadata(_: &str, _: &ArgMatches) -> Result<Value, Error> {
|
|
||||||
Ok(cli_metadata())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn gen_pwd() {
|
|
||||||
println!(
|
|
||||||
"{:?}",
|
|
||||||
argon2::hash_encoded(
|
|
||||||
b"testing1234",
|
|
||||||
&rand::random::<[u8; 16]>()[..],
|
|
||||||
&argon2::Config::default()
|
|
||||||
)
|
|
||||||
.unwrap()
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[instrument(skip(ctx, password))]
|
|
||||||
async fn cli_login(
|
|
||||||
ctx: CliContext,
|
|
||||||
password: Option<String>,
|
|
||||||
metadata: Value,
|
|
||||||
) -> Result<(), RpcError> {
|
|
||||||
let password = if let Some(password) = password {
|
|
||||||
password
|
|
||||||
} else {
|
|
||||||
rpassword::prompt_password("Password: ")?
|
|
||||||
};
|
|
||||||
|
|
||||||
rpc_toolkit::command_helpers::call_remote(
|
|
||||||
ctx,
|
|
||||||
"auth.login",
|
|
||||||
serde_json::json!({ "password": password, "metadata": metadata }),
|
|
||||||
PhantomData::<()>,
|
|
||||||
)
|
|
||||||
.await?
|
|
||||||
.result?;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn check_password(hash: &str, password: &str) -> Result<(), Error> {
|
|
||||||
ensure_code!(
|
|
||||||
argon2::verify_encoded(&hash, password.as_bytes()).map_err(|_| {
|
|
||||||
Error::new(
|
|
||||||
eyre!("Password Incorrect"),
|
|
||||||
crate::ErrorKind::IncorrectPassword,
|
|
||||||
)
|
|
||||||
})?,
|
|
||||||
crate::ErrorKind::IncorrectPassword,
|
|
||||||
"Password Incorrect"
|
|
||||||
);
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn check_password_against_db<Ex>(secrets: &mut Ex, password: &str) -> Result<(), Error>
|
|
||||||
where
|
|
||||||
for<'a> &'a mut Ex: Executor<'a, Database = Postgres>,
|
|
||||||
{
|
|
||||||
let pw_hash = sqlx::query!("SELECT password FROM account")
|
|
||||||
.fetch_one(secrets)
|
|
||||||
.await?
|
|
||||||
.password;
|
|
||||||
check_password(&pw_hash, password)?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[command(
|
|
||||||
custom_cli(cli_login(async, context(CliContext))),
|
|
||||||
display(display_none),
|
|
||||||
metadata(authenticated = false)
|
|
||||||
)]
|
|
||||||
#[instrument(skip(ctx, password))]
|
|
||||||
pub async fn login(
|
|
||||||
#[context] ctx: RpcContext,
|
|
||||||
#[request] req: &RequestParts,
|
|
||||||
#[response] res: &mut ResponseParts,
|
|
||||||
#[arg] password: Option<String>,
|
|
||||||
#[arg(
|
|
||||||
parse(parse_metadata),
|
|
||||||
default = "cli_metadata",
|
|
||||||
help = "RPC Only: This value cannot be overidden from the cli"
|
|
||||||
)]
|
|
||||||
metadata: Value,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
let password = password.unwrap_or_default();
|
|
||||||
let mut handle = ctx.secret_store.acquire().await?;
|
|
||||||
check_password_against_db(&mut handle, &password).await?;
|
|
||||||
|
|
||||||
let hash_token = HashSessionToken::new();
|
|
||||||
let user_agent = req.headers.get("user-agent").and_then(|h| h.to_str().ok());
|
|
||||||
let metadata = serde_json::to_string(&metadata).with_kind(crate::ErrorKind::Database)?;
|
|
||||||
let hash_token_hashed = hash_token.hashed();
|
|
||||||
sqlx::query!(
|
|
||||||
"INSERT INTO session (id, user_agent, metadata) VALUES ($1, $2, $3)",
|
|
||||||
hash_token_hashed,
|
|
||||||
user_agent,
|
|
||||||
metadata,
|
|
||||||
)
|
|
||||||
.execute(&mut handle)
|
|
||||||
.await?;
|
|
||||||
res.headers.insert(
|
|
||||||
"set-cookie",
|
|
||||||
hash_token.header_value()?, // Should be impossible, but don't want to panic
|
|
||||||
);
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[command(display(display_none), metadata(authenticated = false))]
|
|
||||||
#[instrument(skip(ctx))]
|
|
||||||
pub async fn logout(
|
|
||||||
#[context] ctx: RpcContext,
|
|
||||||
#[request] req: &RequestParts,
|
|
||||||
) -> Result<Option<HasLoggedOutSessions>, Error> {
|
|
||||||
let auth = match HashSessionToken::from_request_parts(req) {
|
|
||||||
Err(_) => return Ok(None),
|
|
||||||
Ok(a) => a,
|
|
||||||
};
|
|
||||||
Ok(Some(HasLoggedOutSessions::new(vec![auth], &ctx).await?))
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Deserialize, Serialize)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
pub struct Session {
|
|
||||||
logged_in: DateTime<Utc>,
|
|
||||||
last_active: DateTime<Utc>,
|
|
||||||
user_agent: Option<String>,
|
|
||||||
metadata: Value,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Deserialize, Serialize)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
pub struct SessionList {
|
|
||||||
current: String,
|
|
||||||
sessions: BTreeMap<String, Session>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[command(subcommands(list, kill))]
|
|
||||||
pub async fn session() -> Result<(), Error> {
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn display_sessions(arg: SessionList, matches: &ArgMatches) {
|
|
||||||
use prettytable::*;
|
|
||||||
|
|
||||||
if matches.is_present("format") {
|
|
||||||
return display_serializable(arg, matches);
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut table = Table::new();
|
|
||||||
table.add_row(row![bc =>
|
|
||||||
"ID",
|
|
||||||
"LOGGED IN",
|
|
||||||
"LAST ACTIVE",
|
|
||||||
"USER AGENT",
|
|
||||||
"METADATA",
|
|
||||||
]);
|
|
||||||
for (id, session) in arg.sessions {
|
|
||||||
let mut row = row![
|
|
||||||
&id,
|
|
||||||
&format!("{}", session.logged_in),
|
|
||||||
&format!("{}", session.last_active),
|
|
||||||
session.user_agent.as_deref().unwrap_or("N/A"),
|
|
||||||
&format!("{}", session.metadata),
|
|
||||||
];
|
|
||||||
if id == arg.current {
|
|
||||||
row.iter_mut()
|
|
||||||
.map(|c| c.style(Attr::ForegroundColor(color::GREEN)))
|
|
||||||
.collect::<()>()
|
|
||||||
}
|
|
||||||
table.add_row(row);
|
|
||||||
}
|
|
||||||
table.print_tty(false).unwrap();
|
|
||||||
}
|
|
||||||
|
|
||||||
#[command(display(display_sessions))]
|
|
||||||
#[instrument(skip(ctx))]
|
|
||||||
pub async fn list(
|
|
||||||
#[context] ctx: RpcContext,
|
|
||||||
#[request] req: &RequestParts,
|
|
||||||
#[allow(unused_variables)]
|
|
||||||
#[arg(long = "format")]
|
|
||||||
format: Option<IoFormat>,
|
|
||||||
) -> Result<SessionList, Error> {
|
|
||||||
Ok(SessionList {
|
|
||||||
current: HashSessionToken::from_request_parts(req)?.as_hash(),
|
|
||||||
sessions: sqlx::query!(
|
|
||||||
"SELECT * FROM session WHERE logged_out IS NULL OR logged_out > CURRENT_TIMESTAMP"
|
|
||||||
)
|
|
||||||
.fetch_all(&mut ctx.secret_store.acquire().await?)
|
|
||||||
.await?
|
|
||||||
.into_iter()
|
|
||||||
.map(|row| {
|
|
||||||
Ok((
|
|
||||||
row.id,
|
|
||||||
Session {
|
|
||||||
logged_in: DateTime::from_utc(row.logged_in, Utc),
|
|
||||||
last_active: DateTime::from_utc(row.last_active, Utc),
|
|
||||||
user_agent: row.user_agent,
|
|
||||||
metadata: serde_json::from_str(&row.metadata)
|
|
||||||
.with_kind(crate::ErrorKind::Database)?,
|
|
||||||
},
|
|
||||||
))
|
|
||||||
})
|
|
||||||
.collect::<Result<_, Error>>()?,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
fn parse_comma_separated(arg: &str, _: &ArgMatches) -> Result<Vec<String>, RpcError> {
|
|
||||||
Ok(arg.split(",").map(|s| s.trim().to_owned()).collect())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
|
||||||
struct KillSessionId(String);
|
|
||||||
|
|
||||||
impl AsLogoutSessionId for KillSessionId {
|
|
||||||
fn as_logout_session_id(self) -> String {
|
|
||||||
self.0
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[command(display(display_none))]
|
|
||||||
#[instrument(skip(ctx))]
|
|
||||||
pub async fn kill(
|
|
||||||
#[context] ctx: RpcContext,
|
|
||||||
#[arg(parse(parse_comma_separated))] ids: Vec<String>,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
HasLoggedOutSessions::new(ids.into_iter().map(KillSessionId), &ctx).await?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[instrument(skip(ctx, old_password, new_password))]
|
|
||||||
async fn cli_reset_password(
|
|
||||||
ctx: CliContext,
|
|
||||||
old_password: Option<String>,
|
|
||||||
new_password: Option<String>,
|
|
||||||
) -> Result<(), RpcError> {
|
|
||||||
let old_password = if let Some(old_password) = old_password {
|
|
||||||
old_password
|
|
||||||
} else {
|
|
||||||
rpassword::prompt_password("Current Password: ")?
|
|
||||||
};
|
|
||||||
|
|
||||||
let new_password = if let Some(new_password) = new_password {
|
|
||||||
new_password
|
|
||||||
} else {
|
|
||||||
let new_password = rpassword::prompt_password("New Password: ")?;
|
|
||||||
if new_password != rpassword::prompt_password("Confirm: ")? {
|
|
||||||
return Err(Error::new(
|
|
||||||
eyre!("Passwords do not match"),
|
|
||||||
crate::ErrorKind::IncorrectPassword,
|
|
||||||
)
|
|
||||||
.into());
|
|
||||||
}
|
|
||||||
new_password
|
|
||||||
};
|
|
||||||
|
|
||||||
rpc_toolkit::command_helpers::call_remote(
|
|
||||||
ctx,
|
|
||||||
"auth.reset-password",
|
|
||||||
serde_json::json!({ "old-password": old_password, "new-password": new_password }),
|
|
||||||
PhantomData::<()>,
|
|
||||||
)
|
|
||||||
.await?
|
|
||||||
.result?;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct SetPasswordReceipt(LockReceipt<String, ()>);
|
|
||||||
impl SetPasswordReceipt {
|
|
||||||
pub async fn new<Db: DbHandle>(db: &mut Db) -> Result<Self, Error> {
|
|
||||||
let mut locks = Vec::new();
|
|
||||||
|
|
||||||
let setup = Self::setup(&mut locks);
|
|
||||||
Ok(setup(&db.lock_all(locks).await?)?)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn setup(
|
|
||||||
locks: &mut Vec<patch_db::LockTargetId>,
|
|
||||||
) -> impl FnOnce(&patch_db::Verifier) -> Result<Self, Error> {
|
|
||||||
let password_hash = crate::db::DatabaseModel::new()
|
|
||||||
.server_info()
|
|
||||||
.password_hash()
|
|
||||||
.make_locker(patch_db::LockType::Write)
|
|
||||||
.add_to_keys(locks);
|
|
||||||
move |skeleton_key| Ok(Self(password_hash.verify(skeleton_key)?))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn set_password<Db: DbHandle, Ex>(
|
|
||||||
db: &mut Db,
|
|
||||||
receipt: &SetPasswordReceipt,
|
|
||||||
secrets: &mut Ex,
|
|
||||||
password: &str,
|
|
||||||
) -> Result<(), Error>
|
|
||||||
where
|
|
||||||
for<'a> &'a mut Ex: Executor<'a, Database = Postgres>,
|
|
||||||
{
|
|
||||||
let password = argon2::hash_encoded(
|
|
||||||
password.as_bytes(),
|
|
||||||
&rand::random::<[u8; 16]>()[..],
|
|
||||||
&argon2::Config::default(),
|
|
||||||
)
|
|
||||||
.with_kind(crate::ErrorKind::PasswordHashGeneration)?;
|
|
||||||
|
|
||||||
sqlx::query!("UPDATE account SET password = $1", password,)
|
|
||||||
.execute(secrets)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
receipt.0.set(db, password).await?;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[command(
|
|
||||||
rename = "reset-password",
|
|
||||||
custom_cli(cli_reset_password(async, context(CliContext))),
|
|
||||||
display(display_none)
|
|
||||||
)]
|
|
||||||
#[instrument(skip(ctx, old_password, new_password))]
|
|
||||||
pub async fn reset_password(
|
|
||||||
#[context] ctx: RpcContext,
|
|
||||||
#[arg(rename = "old-password")] old_password: Option<String>,
|
|
||||||
#[arg(rename = "new-password")] new_password: Option<String>,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
let old_password = old_password.unwrap_or_default();
|
|
||||||
let new_password = new_password.unwrap_or_default();
|
|
||||||
|
|
||||||
let mut secrets = ctx.secret_store.acquire().await?;
|
|
||||||
check_password_against_db(&mut secrets, &old_password).await?;
|
|
||||||
|
|
||||||
let mut db = ctx.db.handle();
|
|
||||||
|
|
||||||
let set_password_receipt = SetPasswordReceipt::new(&mut db).await?;
|
|
||||||
|
|
||||||
set_password(&mut db, &set_password_receipt, &mut secrets, &new_password).await?;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
@@ -1,466 +0,0 @@
|
|||||||
use std::collections::{BTreeMap, BTreeSet};
|
|
||||||
use std::path::PathBuf;
|
|
||||||
|
|
||||||
use chrono::Utc;
|
|
||||||
use clap::ArgMatches;
|
|
||||||
use color_eyre::eyre::eyre;
|
|
||||||
use helpers::AtomicFile;
|
|
||||||
use openssl::pkey::{PKey, Private};
|
|
||||||
use openssl::x509::X509;
|
|
||||||
use patch_db::{DbHandle, LockType, PatchDbHandle};
|
|
||||||
use rpc_toolkit::command;
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
use serde_json::Value;
|
|
||||||
use tokio::io::AsyncWriteExt;
|
|
||||||
use torut::onion::TorSecretKeyV3;
|
|
||||||
use tracing::instrument;
|
|
||||||
|
|
||||||
use super::target::BackupTargetId;
|
|
||||||
use super::PackageBackupReport;
|
|
||||||
use crate::auth::check_password_against_db;
|
|
||||||
use crate::backup::{BackupReport, ServerBackupReport};
|
|
||||||
use crate::context::RpcContext;
|
|
||||||
use crate::db::model::BackupProgress;
|
|
||||||
use crate::disk::mount::backup::BackupMountGuard;
|
|
||||||
use crate::disk::mount::filesystem::ReadWrite;
|
|
||||||
use crate::disk::mount::guard::TmpMountGuard;
|
|
||||||
use crate::notifications::NotificationLevel;
|
|
||||||
use crate::s9pk::manifest::PackageId;
|
|
||||||
use crate::status::MainStatus;
|
|
||||||
use crate::util::display_none;
|
|
||||||
use crate::util::serde::IoFormat;
|
|
||||||
use crate::version::VersionT;
|
|
||||||
use crate::{Error, ErrorKind, ResultExt};
|
|
||||||
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub struct OsBackup {
|
|
||||||
pub tor_key: TorSecretKeyV3,
|
|
||||||
pub root_ca_key: PKey<Private>,
|
|
||||||
pub root_ca_cert: X509,
|
|
||||||
pub ui: Value,
|
|
||||||
}
|
|
||||||
impl<'de> Deserialize<'de> for OsBackup {
|
|
||||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
|
||||||
where
|
|
||||||
D: serde::Deserializer<'de>,
|
|
||||||
{
|
|
||||||
#[derive(Deserialize)]
|
|
||||||
#[serde(rename = "kebab-case")]
|
|
||||||
struct OsBackupDe {
|
|
||||||
tor_key: String,
|
|
||||||
root_ca_key: String,
|
|
||||||
root_ca_cert: String,
|
|
||||||
ui: Value,
|
|
||||||
}
|
|
||||||
let int = OsBackupDe::deserialize(deserializer)?;
|
|
||||||
let key_vec = base32::decode(base32::Alphabet::RFC4648 { padding: true }, &int.tor_key)
|
|
||||||
.ok_or_else(|| {
|
|
||||||
serde::de::Error::invalid_value(
|
|
||||||
serde::de::Unexpected::Str(&int.tor_key),
|
|
||||||
&"an RFC4648 encoded string",
|
|
||||||
)
|
|
||||||
})?;
|
|
||||||
if key_vec.len() != 64 {
|
|
||||||
return Err(serde::de::Error::invalid_value(
|
|
||||||
serde::de::Unexpected::Str(&int.tor_key),
|
|
||||||
&"a 64 byte value encoded as an RFC4648 string",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
let mut key_slice = [0; 64];
|
|
||||||
key_slice.clone_from_slice(&key_vec);
|
|
||||||
Ok(OsBackup {
|
|
||||||
tor_key: TorSecretKeyV3::from(key_slice),
|
|
||||||
root_ca_key: PKey::<Private>::private_key_from_pem(int.root_ca_key.as_bytes())
|
|
||||||
.map_err(serde::de::Error::custom)?,
|
|
||||||
root_ca_cert: X509::from_pem(int.root_ca_cert.as_bytes())
|
|
||||||
.map_err(serde::de::Error::custom)?,
|
|
||||||
ui: int.ui,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl Serialize for OsBackup {
|
|
||||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
|
||||||
where
|
|
||||||
S: serde::Serializer,
|
|
||||||
{
|
|
||||||
#[derive(Serialize)]
|
|
||||||
#[serde(rename = "kebab-case")]
|
|
||||||
struct OsBackupSer<'a> {
|
|
||||||
tor_key: String,
|
|
||||||
root_ca_key: String,
|
|
||||||
root_ca_cert: String,
|
|
||||||
ui: &'a Value,
|
|
||||||
}
|
|
||||||
OsBackupSer {
|
|
||||||
tor_key: base32::encode(
|
|
||||||
base32::Alphabet::RFC4648 { padding: true },
|
|
||||||
&self.tor_key.as_bytes(),
|
|
||||||
),
|
|
||||||
root_ca_key: String::from_utf8(
|
|
||||||
self.root_ca_key
|
|
||||||
.private_key_to_pem_pkcs8()
|
|
||||||
.map_err(serde::ser::Error::custom)?,
|
|
||||||
)
|
|
||||||
.map_err(serde::ser::Error::custom)?,
|
|
||||||
root_ca_cert: String::from_utf8(
|
|
||||||
self.root_ca_cert
|
|
||||||
.to_pem()
|
|
||||||
.map_err(serde::ser::Error::custom)?,
|
|
||||||
)
|
|
||||||
.map_err(serde::ser::Error::custom)?,
|
|
||||||
ui: &self.ui,
|
|
||||||
}
|
|
||||||
.serialize(serializer)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn parse_comma_separated(arg: &str, _: &ArgMatches) -> Result<BTreeSet<PackageId>, Error> {
|
|
||||||
arg.split(',')
|
|
||||||
.map(|s| s.trim().parse().map_err(Error::from))
|
|
||||||
.collect()
|
|
||||||
}
|
|
||||||
|
|
||||||
#[command(rename = "create", display(display_none))]
|
|
||||||
#[instrument(skip(ctx, old_password, password))]
|
|
||||||
pub async fn backup_all(
|
|
||||||
#[context] ctx: RpcContext,
|
|
||||||
#[arg(rename = "target-id")] target_id: BackupTargetId,
|
|
||||||
#[arg(rename = "old-password", long = "old-password")] old_password: Option<String>,
|
|
||||||
#[arg(
|
|
||||||
rename = "package-ids",
|
|
||||||
long = "package-ids",
|
|
||||||
parse(parse_comma_separated)
|
|
||||||
)]
|
|
||||||
package_ids: Option<BTreeSet<PackageId>>,
|
|
||||||
#[arg] password: String,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
let mut db = ctx.db.handle();
|
|
||||||
check_password_against_db(&mut ctx.secret_store.acquire().await?, &password).await?;
|
|
||||||
let fs = target_id
|
|
||||||
.load(&mut ctx.secret_store.acquire().await?)
|
|
||||||
.await?;
|
|
||||||
let mut backup_guard = BackupMountGuard::mount(
|
|
||||||
TmpMountGuard::mount(&fs, ReadWrite).await?,
|
|
||||||
old_password.as_ref().unwrap_or(&password),
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
let all_packages = crate::db::DatabaseModel::new()
|
|
||||||
.package_data()
|
|
||||||
.get(&mut db, false)
|
|
||||||
.await?
|
|
||||||
.0
|
|
||||||
.keys()
|
|
||||||
.into_iter()
|
|
||||||
.cloned()
|
|
||||||
.collect();
|
|
||||||
let package_ids = package_ids.unwrap_or(all_packages);
|
|
||||||
if old_password.is_some() {
|
|
||||||
backup_guard.change_password(&password)?;
|
|
||||||
}
|
|
||||||
assure_backing_up(&mut db, &package_ids).await?;
|
|
||||||
tokio::task::spawn(async move {
|
|
||||||
let backup_res = perform_backup(&ctx, &mut db, backup_guard, &package_ids).await;
|
|
||||||
let backup_progress = crate::db::DatabaseModel::new()
|
|
||||||
.server_info()
|
|
||||||
.status_info()
|
|
||||||
.backup_progress();
|
|
||||||
backup_progress
|
|
||||||
.clone()
|
|
||||||
.lock(&mut db, LockType::Write)
|
|
||||||
.await
|
|
||||||
.expect("failed to lock server status");
|
|
||||||
match backup_res {
|
|
||||||
Ok(report) if report.iter().all(|(_, rep)| rep.error.is_none()) => ctx
|
|
||||||
.notification_manager
|
|
||||||
.notify(
|
|
||||||
&mut db,
|
|
||||||
None,
|
|
||||||
NotificationLevel::Success,
|
|
||||||
"Backup Complete".to_owned(),
|
|
||||||
"Your backup has completed".to_owned(),
|
|
||||||
BackupReport {
|
|
||||||
server: ServerBackupReport {
|
|
||||||
attempted: true,
|
|
||||||
error: None,
|
|
||||||
},
|
|
||||||
packages: report,
|
|
||||||
},
|
|
||||||
None,
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
.expect("failed to send notification"),
|
|
||||||
Ok(report) => ctx
|
|
||||||
.notification_manager
|
|
||||||
.notify(
|
|
||||||
&mut db,
|
|
||||||
None,
|
|
||||||
NotificationLevel::Warning,
|
|
||||||
"Backup Complete".to_owned(),
|
|
||||||
"Your backup has completed, but some package(s) failed to backup".to_owned(),
|
|
||||||
BackupReport {
|
|
||||||
server: ServerBackupReport {
|
|
||||||
attempted: true,
|
|
||||||
error: None,
|
|
||||||
},
|
|
||||||
packages: report,
|
|
||||||
},
|
|
||||||
None,
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
.expect("failed to send notification"),
|
|
||||||
Err(e) => {
|
|
||||||
tracing::error!("Backup Failed: {}", e);
|
|
||||||
tracing::debug!("{:?}", e);
|
|
||||||
ctx.notification_manager
|
|
||||||
.notify(
|
|
||||||
&mut db,
|
|
||||||
None,
|
|
||||||
NotificationLevel::Error,
|
|
||||||
"Backup Failed".to_owned(),
|
|
||||||
"Your backup failed to complete.".to_owned(),
|
|
||||||
BackupReport {
|
|
||||||
server: ServerBackupReport {
|
|
||||||
attempted: true,
|
|
||||||
error: Some(e.to_string()),
|
|
||||||
},
|
|
||||||
packages: BTreeMap::new(),
|
|
||||||
},
|
|
||||||
None,
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
.expect("failed to send notification");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
backup_progress
|
|
||||||
.delete(&mut db)
|
|
||||||
.await
|
|
||||||
.expect("failed to change server status");
|
|
||||||
});
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[instrument(skip(db, packages))]
|
|
||||||
async fn assure_backing_up(
|
|
||||||
db: &mut PatchDbHandle,
|
|
||||||
packages: impl IntoIterator<Item = &PackageId>,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
let mut tx = db.begin().await?;
|
|
||||||
let mut backing_up = crate::db::DatabaseModel::new()
|
|
||||||
.server_info()
|
|
||||||
.status_info()
|
|
||||||
.backup_progress()
|
|
||||||
.get_mut(&mut tx)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
if backing_up
|
|
||||||
.iter()
|
|
||||||
.flat_map(|x| x.values())
|
|
||||||
.fold(false, |acc, x| {
|
|
||||||
if !x.complete {
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
acc
|
|
||||||
})
|
|
||||||
{
|
|
||||||
return Err(Error::new(
|
|
||||||
eyre!("Server is already backing up!"),
|
|
||||||
crate::ErrorKind::InvalidRequest,
|
|
||||||
));
|
|
||||||
}
|
|
||||||
*backing_up = Some(
|
|
||||||
packages
|
|
||||||
.into_iter()
|
|
||||||
.map(|x| (x.clone(), BackupProgress { complete: false }))
|
|
||||||
.collect(),
|
|
||||||
);
|
|
||||||
backing_up.save(&mut tx).await?;
|
|
||||||
tx.commit().await?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[instrument(skip(ctx, db, backup_guard))]
|
|
||||||
async fn perform_backup<Db: DbHandle>(
|
|
||||||
ctx: &RpcContext,
|
|
||||||
mut db: Db,
|
|
||||||
mut backup_guard: BackupMountGuard<TmpMountGuard>,
|
|
||||||
package_ids: &BTreeSet<PackageId>,
|
|
||||||
) -> Result<BTreeMap<PackageId, PackageBackupReport>, Error> {
|
|
||||||
let mut backup_report = BTreeMap::new();
|
|
||||||
|
|
||||||
for package_id in crate::db::DatabaseModel::new()
|
|
||||||
.package_data()
|
|
||||||
.keys(&mut db, false)
|
|
||||||
.await?
|
|
||||||
.into_iter()
|
|
||||||
.filter(|id| package_ids.contains(id))
|
|
||||||
{
|
|
||||||
let mut tx = db.begin().await?; // for lock scope
|
|
||||||
let installed_model = if let Some(installed_model) = crate::db::DatabaseModel::new()
|
|
||||||
.package_data()
|
|
||||||
.idx_model(&package_id)
|
|
||||||
.and_then(|m| m.installed())
|
|
||||||
.check(&mut tx)
|
|
||||||
.await?
|
|
||||||
{
|
|
||||||
installed_model
|
|
||||||
} else {
|
|
||||||
continue;
|
|
||||||
};
|
|
||||||
let main_status_model = installed_model.clone().status().main();
|
|
||||||
|
|
||||||
main_status_model.lock(&mut tx, LockType::Write).await?;
|
|
||||||
let (started, health) = match main_status_model.get(&mut tx, true).await?.into_owned() {
|
|
||||||
MainStatus::Starting { .. } => (Some(Utc::now()), Default::default()),
|
|
||||||
MainStatus::Running { started, health } => (Some(started), health.clone()),
|
|
||||||
MainStatus::Stopped | MainStatus::Stopping | MainStatus::Restarting => {
|
|
||||||
(None, Default::default())
|
|
||||||
}
|
|
||||||
MainStatus::BackingUp { .. } => {
|
|
||||||
backup_report.insert(
|
|
||||||
package_id,
|
|
||||||
PackageBackupReport {
|
|
||||||
error: Some(
|
|
||||||
"Can't do backup because service is in a backing up state".to_owned(),
|
|
||||||
),
|
|
||||||
},
|
|
||||||
);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
main_status_model
|
|
||||||
.put(
|
|
||||||
&mut tx,
|
|
||||||
&MainStatus::BackingUp {
|
|
||||||
started,
|
|
||||||
health: health.clone(),
|
|
||||||
},
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
tx.save().await?; // drop locks
|
|
||||||
|
|
||||||
let manifest = installed_model
|
|
||||||
.clone()
|
|
||||||
.manifest()
|
|
||||||
.get(&mut db, false)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
ctx.managers
|
|
||||||
.get(&(manifest.id.clone(), manifest.version.clone()))
|
|
||||||
.await
|
|
||||||
.ok_or_else(|| {
|
|
||||||
Error::new(eyre!("Manager not found"), crate::ErrorKind::InvalidRequest)
|
|
||||||
})?
|
|
||||||
.synchronize()
|
|
||||||
.await;
|
|
||||||
|
|
||||||
let mut tx = db.begin().await?;
|
|
||||||
|
|
||||||
installed_model.lock(&mut tx, LockType::Write).await?;
|
|
||||||
|
|
||||||
let guard = backup_guard.mount_package_backup(&package_id).await?;
|
|
||||||
let res = manifest
|
|
||||||
.backup
|
|
||||||
.create(
|
|
||||||
ctx,
|
|
||||||
&mut tx,
|
|
||||||
&package_id,
|
|
||||||
&manifest.title,
|
|
||||||
&manifest.version,
|
|
||||||
&manifest.interfaces,
|
|
||||||
&manifest.volumes,
|
|
||||||
)
|
|
||||||
.await;
|
|
||||||
guard.unmount().await?;
|
|
||||||
backup_report.insert(
|
|
||||||
package_id.clone(),
|
|
||||||
PackageBackupReport {
|
|
||||||
error: res.as_ref().err().map(|e| e.to_string()),
|
|
||||||
},
|
|
||||||
);
|
|
||||||
|
|
||||||
if let Ok(pkg_meta) = res {
|
|
||||||
installed_model
|
|
||||||
.last_backup()
|
|
||||||
.put(&mut tx, &Some(pkg_meta.timestamp))
|
|
||||||
.await?;
|
|
||||||
backup_guard
|
|
||||||
.metadata
|
|
||||||
.package_backups
|
|
||||||
.insert(package_id.clone(), pkg_meta);
|
|
||||||
}
|
|
||||||
|
|
||||||
main_status_model
|
|
||||||
.put(
|
|
||||||
&mut tx,
|
|
||||||
&match started {
|
|
||||||
Some(started) => MainStatus::Running { started, health },
|
|
||||||
None => MainStatus::Stopped,
|
|
||||||
},
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
let mut backup_progress = crate::db::DatabaseModel::new()
|
|
||||||
.server_info()
|
|
||||||
.status_info()
|
|
||||||
.backup_progress()
|
|
||||||
.get_mut(&mut tx)
|
|
||||||
.await?;
|
|
||||||
if backup_progress.is_none() {
|
|
||||||
*backup_progress = Some(Default::default());
|
|
||||||
}
|
|
||||||
if let Some(mut backup_progress) = backup_progress
|
|
||||||
.as_mut()
|
|
||||||
.and_then(|bp| bp.get_mut(&package_id))
|
|
||||||
{
|
|
||||||
(*backup_progress).complete = true;
|
|
||||||
}
|
|
||||||
backup_progress.save(&mut tx).await?;
|
|
||||||
tx.save().await?;
|
|
||||||
}
|
|
||||||
|
|
||||||
crate::db::DatabaseModel::new()
|
|
||||||
.lock(&mut db, LockType::Write)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
let (root_ca_key, root_ca_cert) = ctx.net_controller.ssl.export_root_ca().await?;
|
|
||||||
let mut os_backup_file = AtomicFile::new(
|
|
||||||
backup_guard.as_ref().join("os-backup.cbor"),
|
|
||||||
None::<PathBuf>,
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
.with_kind(ErrorKind::Filesystem)?;
|
|
||||||
os_backup_file
|
|
||||||
.write_all(
|
|
||||||
&IoFormat::Cbor.to_vec(&OsBackup {
|
|
||||||
tor_key: ctx.net_controller.tor.embassyd_tor_key().await,
|
|
||||||
root_ca_key,
|
|
||||||
root_ca_cert,
|
|
||||||
ui: crate::db::DatabaseModel::new()
|
|
||||||
.ui()
|
|
||||||
.get(&mut db, true)
|
|
||||||
.await?
|
|
||||||
.into_owned(),
|
|
||||||
})?,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
os_backup_file
|
|
||||||
.save()
|
|
||||||
.await
|
|
||||||
.with_kind(ErrorKind::Filesystem)?;
|
|
||||||
|
|
||||||
let timestamp = Some(Utc::now());
|
|
||||||
|
|
||||||
backup_guard.unencrypted_metadata.version = crate::version::Current::new().semver().into();
|
|
||||||
backup_guard.unencrypted_metadata.full = true;
|
|
||||||
backup_guard.metadata.version = crate::version::Current::new().semver().into();
|
|
||||||
backup_guard.metadata.timestamp = timestamp;
|
|
||||||
|
|
||||||
backup_guard.save_and_unmount().await?;
|
|
||||||
|
|
||||||
crate::db::DatabaseModel::new()
|
|
||||||
.server_info()
|
|
||||||
.last_backup()
|
|
||||||
.put(&mut db, ×tamp)
|
|
||||||
.await?;
|
|
||||||
Ok(backup_report)
|
|
||||||
}
|
|
||||||
@@ -1,278 +0,0 @@
|
|||||||
use std::collections::{BTreeMap, BTreeSet};
|
|
||||||
use std::path::{Path, PathBuf};
|
|
||||||
|
|
||||||
use chrono::{DateTime, Utc};
|
|
||||||
use color_eyre::eyre::eyre;
|
|
||||||
use helpers::AtomicFile;
|
|
||||||
use patch_db::{DbHandle, HasModel, LockType};
|
|
||||||
use reqwest::Url;
|
|
||||||
use rpc_toolkit::command;
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
use sqlx::{Executor, Postgres};
|
|
||||||
use tokio::fs::File;
|
|
||||||
use tokio::io::AsyncWriteExt;
|
|
||||||
use tracing::instrument;
|
|
||||||
|
|
||||||
use self::target::PackageBackupInfo;
|
|
||||||
use crate::context::RpcContext;
|
|
||||||
use crate::dependencies::reconfigure_dependents_with_live_pointers;
|
|
||||||
use crate::id::ImageId;
|
|
||||||
use crate::install::PKG_ARCHIVE_DIR;
|
|
||||||
use crate::net::interface::{InterfaceId, Interfaces};
|
|
||||||
use crate::procedure::{NoOutput, PackageProcedure, ProcedureName};
|
|
||||||
use crate::s9pk::manifest::PackageId;
|
|
||||||
use crate::util::serde::IoFormat;
|
|
||||||
use crate::util::Version;
|
|
||||||
use crate::version::{Current, VersionT};
|
|
||||||
use crate::volume::{backup_dir, Volume, VolumeId, Volumes, BACKUP_DIR};
|
|
||||||
use crate::{Error, ErrorKind, ResultExt};
|
|
||||||
|
|
||||||
pub mod backup_bulk;
|
|
||||||
pub mod restore;
|
|
||||||
pub mod target;
|
|
||||||
|
|
||||||
#[derive(Debug, Deserialize, Serialize)]
|
|
||||||
pub struct BackupReport {
|
|
||||||
server: ServerBackupReport,
|
|
||||||
packages: BTreeMap<PackageId, PackageBackupReport>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Deserialize, Serialize)]
|
|
||||||
pub struct ServerBackupReport {
|
|
||||||
attempted: bool,
|
|
||||||
error: Option<String>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Deserialize, Serialize)]
|
|
||||||
pub struct PackageBackupReport {
|
|
||||||
error: Option<String>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[command(subcommands(backup_bulk::backup_all, target::target))]
|
|
||||||
pub fn backup() -> Result<(), Error> {
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[command(rename = "backup", subcommands(restore::restore_packages_rpc))]
|
|
||||||
pub fn package_backup() -> Result<(), Error> {
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Deserialize, Serialize)]
|
|
||||||
struct BackupMetadata {
|
|
||||||
pub timestamp: DateTime<Utc>,
|
|
||||||
pub tor_keys: BTreeMap<InterfaceId, String>,
|
|
||||||
pub marketplace_url: Option<Url>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone, Debug, Deserialize, Serialize, HasModel)]
|
|
||||||
pub struct BackupActions {
|
|
||||||
pub create: PackageProcedure,
|
|
||||||
pub restore: PackageProcedure,
|
|
||||||
}
|
|
||||||
impl BackupActions {
|
|
||||||
pub fn validate(
|
|
||||||
&self,
|
|
||||||
eos_version: &Version,
|
|
||||||
volumes: &Volumes,
|
|
||||||
image_ids: &BTreeSet<ImageId>,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
self.create
|
|
||||||
.validate(eos_version, volumes, image_ids, false)
|
|
||||||
.with_ctx(|_| (crate::ErrorKind::ValidateS9pk, "Backup Create"))?;
|
|
||||||
self.restore
|
|
||||||
.validate(eos_version, volumes, image_ids, false)
|
|
||||||
.with_ctx(|_| (crate::ErrorKind::ValidateS9pk, "Backup Restore"))?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[instrument(skip(ctx, db))]
|
|
||||||
pub async fn create<Db: DbHandle>(
|
|
||||||
&self,
|
|
||||||
ctx: &RpcContext,
|
|
||||||
db: &mut Db,
|
|
||||||
pkg_id: &PackageId,
|
|
||||||
pkg_title: &str,
|
|
||||||
pkg_version: &Version,
|
|
||||||
interfaces: &Interfaces,
|
|
||||||
volumes: &Volumes,
|
|
||||||
) -> Result<PackageBackupInfo, Error> {
|
|
||||||
let mut volumes = volumes.to_readonly();
|
|
||||||
volumes.insert(VolumeId::Backup, Volume::Backup { readonly: false });
|
|
||||||
let backup_dir = backup_dir(pkg_id);
|
|
||||||
if tokio::fs::metadata(&backup_dir).await.is_err() {
|
|
||||||
tokio::fs::create_dir_all(&backup_dir).await?
|
|
||||||
}
|
|
||||||
self.create
|
|
||||||
.execute::<(), NoOutput>(
|
|
||||||
ctx,
|
|
||||||
pkg_id,
|
|
||||||
pkg_version,
|
|
||||||
ProcedureName::CreateBackup,
|
|
||||||
&volumes,
|
|
||||||
None,
|
|
||||||
false,
|
|
||||||
None,
|
|
||||||
)
|
|
||||||
.await?
|
|
||||||
.map_err(|e| eyre!("{}", e.1))
|
|
||||||
.with_kind(crate::ErrorKind::Backup)?;
|
|
||||||
let tor_keys = interfaces
|
|
||||||
.tor_keys(&mut ctx.secret_store.acquire().await?, pkg_id)
|
|
||||||
.await?
|
|
||||||
.into_iter()
|
|
||||||
.map(|(id, key)| {
|
|
||||||
(
|
|
||||||
id,
|
|
||||||
base32::encode(base32::Alphabet::RFC4648 { padding: true }, &key.as_bytes()),
|
|
||||||
)
|
|
||||||
})
|
|
||||||
.collect();
|
|
||||||
let marketplace_url = crate::db::DatabaseModel::new()
|
|
||||||
.package_data()
|
|
||||||
.idx_model(pkg_id)
|
|
||||||
.expect(db)
|
|
||||||
.await?
|
|
||||||
.installed()
|
|
||||||
.expect(db)
|
|
||||||
.await?
|
|
||||||
.marketplace_url()
|
|
||||||
.get(db, true)
|
|
||||||
.await?
|
|
||||||
.into_owned();
|
|
||||||
let tmp_path = Path::new(BACKUP_DIR)
|
|
||||||
.join(pkg_id)
|
|
||||||
.join(format!("{}.s9pk", pkg_id));
|
|
||||||
let s9pk_path = ctx
|
|
||||||
.datadir
|
|
||||||
.join(PKG_ARCHIVE_DIR)
|
|
||||||
.join(pkg_id)
|
|
||||||
.join(pkg_version.as_str())
|
|
||||||
.join(format!("{}.s9pk", pkg_id));
|
|
||||||
let mut infile = File::open(&s9pk_path).await?;
|
|
||||||
let mut outfile = AtomicFile::new(&tmp_path, None::<PathBuf>)
|
|
||||||
.await
|
|
||||||
.with_kind(ErrorKind::Filesystem)?;
|
|
||||||
tokio::io::copy(&mut infile, &mut *outfile)
|
|
||||||
.await
|
|
||||||
.with_ctx(|_| {
|
|
||||||
(
|
|
||||||
crate::ErrorKind::Filesystem,
|
|
||||||
format!("cp {} -> {}", s9pk_path.display(), tmp_path.display()),
|
|
||||||
)
|
|
||||||
})?;
|
|
||||||
outfile.save().await.with_kind(ErrorKind::Filesystem)?;
|
|
||||||
let timestamp = Utc::now();
|
|
||||||
let metadata_path = Path::new(BACKUP_DIR).join(pkg_id).join("metadata.cbor");
|
|
||||||
let mut outfile = AtomicFile::new(&metadata_path, None::<PathBuf>)
|
|
||||||
.await
|
|
||||||
.with_kind(ErrorKind::Filesystem)?;
|
|
||||||
outfile
|
|
||||||
.write_all(&IoFormat::Cbor.to_vec(&BackupMetadata {
|
|
||||||
timestamp,
|
|
||||||
tor_keys,
|
|
||||||
marketplace_url,
|
|
||||||
})?)
|
|
||||||
.await?;
|
|
||||||
outfile.save().await.with_kind(ErrorKind::Filesystem)?;
|
|
||||||
Ok(PackageBackupInfo {
|
|
||||||
os_version: Current::new().semver().into(),
|
|
||||||
title: pkg_title.to_owned(),
|
|
||||||
version: pkg_version.clone(),
|
|
||||||
timestamp,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
#[instrument(skip(ctx, db, secrets))]
|
|
||||||
pub async fn restore<Ex, Db: DbHandle>(
|
|
||||||
&self,
|
|
||||||
ctx: &RpcContext,
|
|
||||||
db: &mut Db,
|
|
||||||
secrets: &mut Ex,
|
|
||||||
pkg_id: &PackageId,
|
|
||||||
pkg_version: &Version,
|
|
||||||
interfaces: &Interfaces,
|
|
||||||
volumes: &Volumes,
|
|
||||||
) -> Result<(), Error>
|
|
||||||
where
|
|
||||||
for<'a> &'a mut Ex: Executor<'a, Database = Postgres>,
|
|
||||||
{
|
|
||||||
let mut volumes = volumes.clone();
|
|
||||||
volumes.insert(VolumeId::Backup, Volume::Backup { readonly: true });
|
|
||||||
self.restore
|
|
||||||
.execute::<(), NoOutput>(
|
|
||||||
ctx,
|
|
||||||
pkg_id,
|
|
||||||
pkg_version,
|
|
||||||
ProcedureName::RestoreBackup,
|
|
||||||
&volumes,
|
|
||||||
None,
|
|
||||||
false,
|
|
||||||
None,
|
|
||||||
)
|
|
||||||
.await?
|
|
||||||
.map_err(|e| eyre!("{}", e.1))
|
|
||||||
.with_kind(crate::ErrorKind::Restore)?;
|
|
||||||
let metadata_path = Path::new(BACKUP_DIR).join(pkg_id).join("metadata.cbor");
|
|
||||||
let metadata: BackupMetadata = IoFormat::Cbor.from_slice(
|
|
||||||
&tokio::fs::read(&metadata_path).await.with_ctx(|_| {
|
|
||||||
(
|
|
||||||
crate::ErrorKind::Filesystem,
|
|
||||||
metadata_path.display().to_string(),
|
|
||||||
)
|
|
||||||
})?,
|
|
||||||
)?;
|
|
||||||
for (iface, key) in metadata.tor_keys {
|
|
||||||
let key_vec = base32::decode(base32::Alphabet::RFC4648 { padding: true }, &key)
|
|
||||||
.ok_or_else(|| {
|
|
||||||
Error::new(
|
|
||||||
eyre!("invalid base32 string"),
|
|
||||||
crate::ErrorKind::Deserialization,
|
|
||||||
)
|
|
||||||
})?;
|
|
||||||
sqlx::query!(
|
|
||||||
"INSERT INTO tor (package, interface, key) VALUES ($1, $2, $3) ON CONFLICT (package, interface) DO UPDATE SET key = $3",
|
|
||||||
**pkg_id,
|
|
||||||
*iface,
|
|
||||||
key_vec,
|
|
||||||
)
|
|
||||||
.execute(&mut *secrets)
|
|
||||||
.await?;
|
|
||||||
}
|
|
||||||
crate::db::DatabaseModel::new()
|
|
||||||
.package_data()
|
|
||||||
.lock(db, LockType::Write)
|
|
||||||
.await?;
|
|
||||||
let pde = crate::db::DatabaseModel::new()
|
|
||||||
.package_data()
|
|
||||||
.idx_model(pkg_id)
|
|
||||||
.expect(db)
|
|
||||||
.await?
|
|
||||||
.installed()
|
|
||||||
.expect(db)
|
|
||||||
.await?;
|
|
||||||
pde.clone()
|
|
||||||
.interface_addresses()
|
|
||||||
.put(db, &interfaces.install(&mut *secrets, pkg_id).await?)
|
|
||||||
.await?;
|
|
||||||
pde.marketplace_url()
|
|
||||||
.put(db, &metadata.marketplace_url)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
let entry = crate::db::DatabaseModel::new()
|
|
||||||
.package_data()
|
|
||||||
.idx_model(pkg_id)
|
|
||||||
.expect(db)
|
|
||||||
.await?
|
|
||||||
.installed()
|
|
||||||
.expect(db)
|
|
||||||
.await?
|
|
||||||
.get(db, true)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
let receipts = crate::config::ConfigReceipts::new(db).await?;
|
|
||||||
reconfigure_dependents_with_live_pointers(ctx, db, &receipts, &entry).await?;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,451 +0,0 @@
|
|||||||
use std::collections::BTreeMap;
|
|
||||||
use std::path::Path;
|
|
||||||
use std::sync::atomic::Ordering;
|
|
||||||
use std::sync::Arc;
|
|
||||||
use std::time::Duration;
|
|
||||||
|
|
||||||
use clap::ArgMatches;
|
|
||||||
use color_eyre::eyre::eyre;
|
|
||||||
use futures::future::BoxFuture;
|
|
||||||
use futures::FutureExt;
|
|
||||||
use openssl::x509::X509;
|
|
||||||
use patch_db::{DbHandle, PatchDbHandle};
|
|
||||||
use rpc_toolkit::command;
|
|
||||||
use tokio::fs::File;
|
|
||||||
use tokio::task::JoinHandle;
|
|
||||||
use torut::onion::OnionAddressV3;
|
|
||||||
use tracing::instrument;
|
|
||||||
|
|
||||||
use super::target::BackupTargetId;
|
|
||||||
use crate::backup::backup_bulk::OsBackup;
|
|
||||||
use crate::context::{RpcContext, SetupContext};
|
|
||||||
use crate::db::model::{PackageDataEntry, StaticFiles};
|
|
||||||
use crate::disk::mount::backup::{BackupMountGuard, PackageBackupMountGuard};
|
|
||||||
use crate::disk::mount::filesystem::ReadOnly;
|
|
||||||
use crate::disk::mount::guard::TmpMountGuard;
|
|
||||||
use crate::install::progress::InstallProgress;
|
|
||||||
use crate::install::{download_install_s9pk, PKG_PUBLIC_DIR};
|
|
||||||
use crate::net::ssl::SslManager;
|
|
||||||
use crate::notifications::NotificationLevel;
|
|
||||||
use crate::s9pk::manifest::{Manifest, PackageId};
|
|
||||||
use crate::s9pk::reader::S9pkReader;
|
|
||||||
use crate::setup::RecoveryStatus;
|
|
||||||
use crate::util::display_none;
|
|
||||||
use crate::util::io::dir_size;
|
|
||||||
use crate::util::serde::IoFormat;
|
|
||||||
use crate::volume::{backup_dir, BACKUP_DIR, PKG_VOLUME_DIR};
|
|
||||||
use crate::{Error, ResultExt};
|
|
||||||
|
|
||||||
fn parse_comma_separated(arg: &str, _: &ArgMatches) -> Result<Vec<PackageId>, Error> {
|
|
||||||
arg.split(',')
|
|
||||||
.map(|s| s.trim().parse().map_err(Error::from))
|
|
||||||
.collect()
|
|
||||||
}
|
|
||||||
|
|
||||||
#[command(rename = "restore", display(display_none))]
|
|
||||||
#[instrument(skip(ctx, password))]
|
|
||||||
pub async fn restore_packages_rpc(
|
|
||||||
#[context] ctx: RpcContext,
|
|
||||||
#[arg(parse(parse_comma_separated))] ids: Vec<PackageId>,
|
|
||||||
#[arg(rename = "target-id")] target_id: BackupTargetId,
|
|
||||||
#[arg] password: String,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
let mut db = ctx.db.handle();
|
|
||||||
let fs = target_id
|
|
||||||
.load(&mut ctx.secret_store.acquire().await?)
|
|
||||||
.await?;
|
|
||||||
let backup_guard =
|
|
||||||
BackupMountGuard::mount(TmpMountGuard::mount(&fs, ReadOnly).await?, &password).await?;
|
|
||||||
|
|
||||||
let (backup_guard, tasks, _) = restore_packages(&ctx, &mut db, backup_guard, ids).await?;
|
|
||||||
|
|
||||||
tokio::spawn(async move {
|
|
||||||
let res = futures::future::join_all(tasks).await;
|
|
||||||
for res in res {
|
|
||||||
match res.with_kind(crate::ErrorKind::Unknown) {
|
|
||||||
Ok((Ok(_), _)) => (),
|
|
||||||
Ok((Err(err), package_id)) => {
|
|
||||||
if let Err(err) = ctx
|
|
||||||
.notification_manager
|
|
||||||
.notify(
|
|
||||||
&mut db,
|
|
||||||
Some(package_id.clone()),
|
|
||||||
NotificationLevel::Error,
|
|
||||||
"Restoration Failure".to_string(),
|
|
||||||
format!("Error restoring package {}: {}", package_id, err),
|
|
||||||
(),
|
|
||||||
None,
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
{
|
|
||||||
tracing::error!("Failed to notify: {}", err);
|
|
||||||
tracing::debug!("{:?}", err);
|
|
||||||
};
|
|
||||||
tracing::error!("Error restoring package {}: {}", package_id, err);
|
|
||||||
tracing::debug!("{:?}", err);
|
|
||||||
}
|
|
||||||
Err(e) => {
|
|
||||||
if let Err(err) = ctx
|
|
||||||
.notification_manager
|
|
||||||
.notify(
|
|
||||||
&mut db,
|
|
||||||
None,
|
|
||||||
NotificationLevel::Error,
|
|
||||||
"Restoration Failure".to_string(),
|
|
||||||
format!("Error during restoration: {}", e),
|
|
||||||
(),
|
|
||||||
None,
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
{
|
|
||||||
tracing::error!("Failed to notify: {}", err);
|
|
||||||
tracing::debug!("{:?}", err);
|
|
||||||
}
|
|
||||||
tracing::error!("Error restoring packages: {}", e);
|
|
||||||
tracing::debug!("{:?}", e);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if let Err(e) = backup_guard.unmount().await {
|
|
||||||
tracing::error!("Error unmounting backup drive: {}", e);
|
|
||||||
tracing::debug!("{:?}", e);
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn approximate_progress(
|
|
||||||
rpc_ctx: &RpcContext,
|
|
||||||
progress: &mut ProgressInfo,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
for (id, size) in &mut progress.target_volume_size {
|
|
||||||
let dir = rpc_ctx.datadir.join(PKG_VOLUME_DIR).join(id).join("data");
|
|
||||||
if tokio::fs::metadata(&dir).await.is_err() {
|
|
||||||
*size = 0;
|
|
||||||
} else {
|
|
||||||
*size = dir_size(&dir).await?;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn approximate_progress_loop(
|
|
||||||
ctx: &SetupContext,
|
|
||||||
rpc_ctx: &RpcContext,
|
|
||||||
mut starting_info: ProgressInfo,
|
|
||||||
) {
|
|
||||||
loop {
|
|
||||||
if let Err(e) = approximate_progress(rpc_ctx, &mut starting_info).await {
|
|
||||||
tracing::error!("Failed to approximate restore progress: {}", e);
|
|
||||||
tracing::debug!("{:?}", e);
|
|
||||||
} else {
|
|
||||||
*ctx.recovery_status.write().await = Some(Ok(starting_info.flatten()));
|
|
||||||
}
|
|
||||||
tokio::time::sleep(Duration::from_secs(1)).await;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Default)]
|
|
||||||
struct ProgressInfo {
|
|
||||||
package_installs: BTreeMap<PackageId, Arc<InstallProgress>>,
|
|
||||||
src_volume_size: BTreeMap<PackageId, u64>,
|
|
||||||
target_volume_size: BTreeMap<PackageId, u64>,
|
|
||||||
}
|
|
||||||
impl ProgressInfo {
|
|
||||||
fn flatten(&self) -> RecoveryStatus {
|
|
||||||
let mut total_bytes = 0;
|
|
||||||
let mut bytes_transferred = 0;
|
|
||||||
|
|
||||||
for progress in self.package_installs.values() {
|
|
||||||
total_bytes += ((progress.size.unwrap_or(0) as f64) * 2.2) as u64;
|
|
||||||
bytes_transferred += progress.downloaded.load(Ordering::SeqCst);
|
|
||||||
bytes_transferred += ((progress.validated.load(Ordering::SeqCst) as f64) * 0.2) as u64;
|
|
||||||
bytes_transferred += progress.unpacked.load(Ordering::SeqCst);
|
|
||||||
}
|
|
||||||
|
|
||||||
for size in self.src_volume_size.values() {
|
|
||||||
total_bytes += *size;
|
|
||||||
}
|
|
||||||
|
|
||||||
for size in self.target_volume_size.values() {
|
|
||||||
bytes_transferred += *size;
|
|
||||||
}
|
|
||||||
|
|
||||||
if bytes_transferred > total_bytes {
|
|
||||||
bytes_transferred = total_bytes;
|
|
||||||
}
|
|
||||||
|
|
||||||
RecoveryStatus {
|
|
||||||
total_bytes,
|
|
||||||
bytes_transferred,
|
|
||||||
complete: false,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[instrument(skip(ctx))]
|
|
||||||
pub async fn recover_full_embassy(
|
|
||||||
ctx: SetupContext,
|
|
||||||
disk_guid: Arc<String>,
|
|
||||||
embassy_password: String,
|
|
||||||
recovery_source: TmpMountGuard,
|
|
||||||
recovery_password: Option<String>,
|
|
||||||
) -> Result<(OnionAddressV3, X509, BoxFuture<'static, Result<(), Error>>), Error> {
|
|
||||||
let backup_guard = BackupMountGuard::mount(
|
|
||||||
recovery_source,
|
|
||||||
recovery_password.as_deref().unwrap_or_default(),
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
let os_backup_path = backup_guard.as_ref().join("os-backup.cbor");
|
|
||||||
let os_backup: OsBackup =
|
|
||||||
IoFormat::Cbor.from_slice(&tokio::fs::read(&os_backup_path).await.with_ctx(|_| {
|
|
||||||
(
|
|
||||||
crate::ErrorKind::Filesystem,
|
|
||||||
os_backup_path.display().to_string(),
|
|
||||||
)
|
|
||||||
})?)?;
|
|
||||||
|
|
||||||
let password = argon2::hash_encoded(
|
|
||||||
embassy_password.as_bytes(),
|
|
||||||
&rand::random::<[u8; 16]>()[..],
|
|
||||||
&argon2::Config::default(),
|
|
||||||
)
|
|
||||||
.with_kind(crate::ErrorKind::PasswordHashGeneration)?;
|
|
||||||
let key_vec = os_backup.tor_key.as_bytes().to_vec();
|
|
||||||
let secret_store = ctx.secret_store().await?;
|
|
||||||
sqlx::query!(
|
|
||||||
"INSERT INTO account (id, password, tor_key) VALUES ($1, $2, $3) ON CONFLICT (id) DO UPDATE SET password = $2, tor_key = $3",
|
|
||||||
0,
|
|
||||||
password,
|
|
||||||
key_vec,
|
|
||||||
)
|
|
||||||
.execute(&mut secret_store.acquire().await?)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
SslManager::import_root_ca(
|
|
||||||
secret_store.clone(),
|
|
||||||
os_backup.root_ca_key,
|
|
||||||
os_backup.root_ca_cert.clone(),
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
secret_store.close().await;
|
|
||||||
|
|
||||||
Ok((
|
|
||||||
os_backup.tor_key.public().get_onion_address(),
|
|
||||||
os_backup.root_ca_cert,
|
|
||||||
async move {
|
|
||||||
let rpc_ctx = RpcContext::init(ctx.config_path.as_ref(), disk_guid).await?;
|
|
||||||
let mut db = rpc_ctx.db.handle();
|
|
||||||
|
|
||||||
let ids = backup_guard
|
|
||||||
.metadata
|
|
||||||
.package_backups
|
|
||||||
.keys()
|
|
||||||
.cloned()
|
|
||||||
.collect();
|
|
||||||
let (backup_guard, tasks, progress_info) = restore_packages(
|
|
||||||
&rpc_ctx,
|
|
||||||
&mut db,
|
|
||||||
backup_guard,
|
|
||||||
ids,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
tokio::select! {
|
|
||||||
res = futures::future::join_all(tasks) => {
|
|
||||||
for res in res {
|
|
||||||
match res.with_kind(crate::ErrorKind::Unknown) {
|
|
||||||
Ok((Ok(_), _)) => (),
|
|
||||||
Ok((Err(err), package_id)) => {
|
|
||||||
if let Err(err) = rpc_ctx.notification_manager.notify(
|
|
||||||
&mut db,
|
|
||||||
Some(package_id.clone()),
|
|
||||||
NotificationLevel::Error,
|
|
||||||
"Restoration Failure".to_string(), format!("Error restoring package {}: {}", package_id,err), (), None).await{
|
|
||||||
tracing::error!("Failed to notify: {}", err);
|
|
||||||
tracing::debug!("{:?}", err);
|
|
||||||
};
|
|
||||||
tracing::error!("Error restoring package {}: {}", package_id, err);
|
|
||||||
tracing::debug!("{:?}", err);
|
|
||||||
},
|
|
||||||
Err(e) => {
|
|
||||||
if let Err(err) = rpc_ctx.notification_manager.notify(
|
|
||||||
&mut db,
|
|
||||||
None,
|
|
||||||
NotificationLevel::Error,
|
|
||||||
"Restoration Failure".to_string(), format!("Error during restoration: {}", e), (), None).await {
|
|
||||||
|
|
||||||
tracing::error!("Failed to notify: {}", err);
|
|
||||||
tracing::debug!("{:?}", err);
|
|
||||||
}
|
|
||||||
tracing::error!("Error restoring packages: {}", e);
|
|
||||||
tracing::debug!("{:?}", e);
|
|
||||||
},
|
|
||||||
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
_ = approximate_progress_loop(&ctx, &rpc_ctx, progress_info) => unreachable!(concat!(module_path!(), "::approximate_progress_loop should not terminate")),
|
|
||||||
}
|
|
||||||
|
|
||||||
backup_guard.unmount().await?;
|
|
||||||
rpc_ctx.shutdown().await
|
|
||||||
}.boxed()
|
|
||||||
))
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn restore_packages(
|
|
||||||
ctx: &RpcContext,
|
|
||||||
db: &mut PatchDbHandle,
|
|
||||||
backup_guard: BackupMountGuard<TmpMountGuard>,
|
|
||||||
ids: Vec<PackageId>,
|
|
||||||
) -> Result<
|
|
||||||
(
|
|
||||||
BackupMountGuard<TmpMountGuard>,
|
|
||||||
Vec<JoinHandle<(Result<(), Error>, PackageId)>>,
|
|
||||||
ProgressInfo,
|
|
||||||
),
|
|
||||||
Error,
|
|
||||||
> {
|
|
||||||
let guards = assure_restoring(ctx, db, ids, &backup_guard).await?;
|
|
||||||
|
|
||||||
let mut progress_info = ProgressInfo::default();
|
|
||||||
|
|
||||||
let mut tasks = Vec::with_capacity(guards.len());
|
|
||||||
for (manifest, guard) in guards {
|
|
||||||
let id = manifest.id.clone();
|
|
||||||
let (progress, task) = restore_package(ctx.clone(), manifest, guard).await?;
|
|
||||||
progress_info.package_installs.insert(id.clone(), progress);
|
|
||||||
progress_info
|
|
||||||
.src_volume_size
|
|
||||||
.insert(id.clone(), dir_size(backup_dir(&id)).await?);
|
|
||||||
progress_info.target_volume_size.insert(id.clone(), 0);
|
|
||||||
let package_id = id.clone();
|
|
||||||
tasks.push(tokio::spawn(
|
|
||||||
async move {
|
|
||||||
if let Err(e) = task.await {
|
|
||||||
tracing::error!("Error restoring package {}: {}", id, e);
|
|
||||||
tracing::debug!("{:?}", e);
|
|
||||||
Err(e)
|
|
||||||
} else {
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
.map(|x| (x, package_id)),
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok((backup_guard, tasks, progress_info))
|
|
||||||
}
|
|
||||||
|
|
||||||
#[instrument(skip(ctx, db, backup_guard))]
|
|
||||||
async fn assure_restoring(
|
|
||||||
ctx: &RpcContext,
|
|
||||||
db: &mut PatchDbHandle,
|
|
||||||
ids: Vec<PackageId>,
|
|
||||||
backup_guard: &BackupMountGuard<TmpMountGuard>,
|
|
||||||
) -> Result<Vec<(Manifest, PackageBackupMountGuard)>, Error> {
|
|
||||||
let mut tx = db.begin().await?;
|
|
||||||
|
|
||||||
let mut guards = Vec::with_capacity(ids.len());
|
|
||||||
|
|
||||||
for id in ids {
|
|
||||||
let mut model = crate::db::DatabaseModel::new()
|
|
||||||
.package_data()
|
|
||||||
.idx_model(&id)
|
|
||||||
.get_mut(&mut tx)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
if !model.is_none() {
|
|
||||||
return Err(Error::new(
|
|
||||||
eyre!("Can't restore over existing package: {}", id),
|
|
||||||
crate::ErrorKind::InvalidRequest,
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
let guard = backup_guard.mount_package_backup(&id).await?;
|
|
||||||
let s9pk_path = Path::new(BACKUP_DIR).join(&id).join(format!("{}.s9pk", id));
|
|
||||||
let mut rdr = S9pkReader::open(&s9pk_path, false).await?;
|
|
||||||
|
|
||||||
let manifest = rdr.manifest().await?;
|
|
||||||
let version = manifest.version.clone();
|
|
||||||
let progress = InstallProgress::new(Some(tokio::fs::metadata(&s9pk_path).await?.len()));
|
|
||||||
|
|
||||||
let public_dir_path = ctx
|
|
||||||
.datadir
|
|
||||||
.join(PKG_PUBLIC_DIR)
|
|
||||||
.join(&id)
|
|
||||||
.join(version.as_str());
|
|
||||||
tokio::fs::create_dir_all(&public_dir_path).await?;
|
|
||||||
|
|
||||||
let license_path = public_dir_path.join("LICENSE.md");
|
|
||||||
let mut dst = File::create(&license_path).await?;
|
|
||||||
tokio::io::copy(&mut rdr.license().await?, &mut dst).await?;
|
|
||||||
dst.sync_all().await?;
|
|
||||||
|
|
||||||
let instructions_path = public_dir_path.join("INSTRUCTIONS.md");
|
|
||||||
let mut dst = File::create(&instructions_path).await?;
|
|
||||||
tokio::io::copy(&mut rdr.instructions().await?, &mut dst).await?;
|
|
||||||
dst.sync_all().await?;
|
|
||||||
|
|
||||||
let icon_path = Path::new("icon").with_extension(&manifest.assets.icon_type());
|
|
||||||
let icon_path = public_dir_path.join(&icon_path);
|
|
||||||
let mut dst = File::create(&icon_path).await?;
|
|
||||||
tokio::io::copy(&mut rdr.icon().await?, &mut dst).await?;
|
|
||||||
dst.sync_all().await?;
|
|
||||||
|
|
||||||
*model = Some(PackageDataEntry::Restoring {
|
|
||||||
install_progress: progress.clone(),
|
|
||||||
static_files: StaticFiles::local(&id, &version, manifest.assets.icon_type()),
|
|
||||||
manifest: manifest.clone(),
|
|
||||||
});
|
|
||||||
model.save(&mut tx).await?;
|
|
||||||
|
|
||||||
guards.push((manifest, guard));
|
|
||||||
}
|
|
||||||
|
|
||||||
tx.commit().await?;
|
|
||||||
Ok(guards)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[instrument(skip(ctx, guard))]
|
|
||||||
async fn restore_package<'a>(
|
|
||||||
ctx: RpcContext,
|
|
||||||
manifest: Manifest,
|
|
||||||
guard: PackageBackupMountGuard,
|
|
||||||
) -> Result<(Arc<InstallProgress>, BoxFuture<'static, Result<(), Error>>), Error> {
|
|
||||||
let s9pk_path = Path::new(BACKUP_DIR)
|
|
||||||
.join(&manifest.id)
|
|
||||||
.join(format!("{}.s9pk", manifest.id));
|
|
||||||
let len = tokio::fs::metadata(&s9pk_path)
|
|
||||||
.await
|
|
||||||
.with_ctx(|_| {
|
|
||||||
(
|
|
||||||
crate::ErrorKind::Filesystem,
|
|
||||||
s9pk_path.display().to_string(),
|
|
||||||
)
|
|
||||||
})?
|
|
||||||
.len();
|
|
||||||
let file = File::open(&s9pk_path).await.with_ctx(|_| {
|
|
||||||
(
|
|
||||||
crate::ErrorKind::Filesystem,
|
|
||||||
s9pk_path.display().to_string(),
|
|
||||||
)
|
|
||||||
})?;
|
|
||||||
|
|
||||||
let progress = InstallProgress::new(Some(len));
|
|
||||||
|
|
||||||
Ok((
|
|
||||||
progress.clone(),
|
|
||||||
async move {
|
|
||||||
download_install_s9pk(&ctx, &manifest, None, progress, file).await?;
|
|
||||||
|
|
||||||
guard.unmount().await?;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
.boxed(),
|
|
||||||
))
|
|
||||||
}
|
|
||||||
@@ -1,211 +0,0 @@
|
|||||||
use std::path::{Path, PathBuf};
|
|
||||||
|
|
||||||
use color_eyre::eyre::eyre;
|
|
||||||
use futures::TryStreamExt;
|
|
||||||
use rpc_toolkit::command;
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
use sqlx::{Executor, Postgres};
|
|
||||||
|
|
||||||
use super::{BackupTarget, BackupTargetId};
|
|
||||||
use crate::context::RpcContext;
|
|
||||||
use crate::disk::mount::filesystem::cifs::Cifs;
|
|
||||||
use crate::disk::mount::filesystem::ReadOnly;
|
|
||||||
use crate::disk::mount::guard::TmpMountGuard;
|
|
||||||
use crate::disk::util::{recovery_info, EmbassyOsRecoveryInfo};
|
|
||||||
use crate::util::display_none;
|
|
||||||
use crate::util::serde::KeyVal;
|
|
||||||
use crate::Error;
|
|
||||||
|
|
||||||
#[derive(Debug, Deserialize, Serialize)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
pub struct CifsBackupTarget {
|
|
||||||
hostname: String,
|
|
||||||
path: PathBuf,
|
|
||||||
username: String,
|
|
||||||
mountable: bool,
|
|
||||||
embassy_os: Option<EmbassyOsRecoveryInfo>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[command(subcommands(add, update, remove))]
|
|
||||||
pub fn cifs() -> Result<(), Error> {
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[command(display(display_none))]
|
|
||||||
pub async fn add(
|
|
||||||
#[context] ctx: RpcContext,
|
|
||||||
#[arg] hostname: String,
|
|
||||||
#[arg] path: PathBuf,
|
|
||||||
#[arg] username: String,
|
|
||||||
#[arg] password: Option<String>,
|
|
||||||
) -> Result<KeyVal<BackupTargetId, BackupTarget>, Error> {
|
|
||||||
let cifs = Cifs {
|
|
||||||
hostname,
|
|
||||||
path,
|
|
||||||
username,
|
|
||||||
password,
|
|
||||||
};
|
|
||||||
let guard = TmpMountGuard::mount(&cifs, ReadOnly).await?;
|
|
||||||
let embassy_os = recovery_info(&guard).await?;
|
|
||||||
guard.unmount().await?;
|
|
||||||
let path_string = Path::new("/").join(&cifs.path).display().to_string();
|
|
||||||
let id: i32 = sqlx::query!(
|
|
||||||
"INSERT INTO cifs_shares (hostname, path, username, password) VALUES ($1, $2, $3, $4) RETURNING id",
|
|
||||||
cifs.hostname,
|
|
||||||
path_string,
|
|
||||||
cifs.username,
|
|
||||||
cifs.password,
|
|
||||||
)
|
|
||||||
.fetch_one(&ctx.secret_store)
|
|
||||||
.await?.id;
|
|
||||||
Ok(KeyVal {
|
|
||||||
key: BackupTargetId::Cifs { id },
|
|
||||||
value: BackupTarget::Cifs(CifsBackupTarget {
|
|
||||||
hostname: cifs.hostname,
|
|
||||||
path: cifs.path,
|
|
||||||
username: cifs.username,
|
|
||||||
mountable: true,
|
|
||||||
embassy_os,
|
|
||||||
}),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
#[command(display(display_none))]
|
|
||||||
pub async fn update(
|
|
||||||
#[context] ctx: RpcContext,
|
|
||||||
#[arg] id: BackupTargetId,
|
|
||||||
#[arg] hostname: String,
|
|
||||||
#[arg] path: PathBuf,
|
|
||||||
#[arg] username: String,
|
|
||||||
#[arg] password: Option<String>,
|
|
||||||
) -> Result<KeyVal<BackupTargetId, BackupTarget>, Error> {
|
|
||||||
let id = if let BackupTargetId::Cifs { id } = id {
|
|
||||||
id
|
|
||||||
} else {
|
|
||||||
return Err(Error::new(
|
|
||||||
eyre!("Backup Target ID {} Not Found", id),
|
|
||||||
crate::ErrorKind::NotFound,
|
|
||||||
));
|
|
||||||
};
|
|
||||||
let cifs = Cifs {
|
|
||||||
hostname,
|
|
||||||
path,
|
|
||||||
username,
|
|
||||||
password,
|
|
||||||
};
|
|
||||||
let guard = TmpMountGuard::mount(&cifs, ReadOnly).await?;
|
|
||||||
let embassy_os = recovery_info(&guard).await?;
|
|
||||||
guard.unmount().await?;
|
|
||||||
let path_string = Path::new("/").join(&cifs.path).display().to_string();
|
|
||||||
if sqlx::query!(
|
|
||||||
"UPDATE cifs_shares SET hostname = $1, path = $2, username = $3, password = $4 WHERE id = $5",
|
|
||||||
cifs.hostname,
|
|
||||||
path_string,
|
|
||||||
cifs.username,
|
|
||||||
cifs.password,
|
|
||||||
id,
|
|
||||||
)
|
|
||||||
.execute(&ctx.secret_store)
|
|
||||||
.await?
|
|
||||||
.rows_affected()
|
|
||||||
== 0
|
|
||||||
{
|
|
||||||
return Err(Error::new(
|
|
||||||
eyre!("Backup Target ID {} Not Found", BackupTargetId::Cifs { id }),
|
|
||||||
crate::ErrorKind::NotFound,
|
|
||||||
));
|
|
||||||
};
|
|
||||||
Ok(KeyVal {
|
|
||||||
key: BackupTargetId::Cifs { id },
|
|
||||||
value: BackupTarget::Cifs(CifsBackupTarget {
|
|
||||||
hostname: cifs.hostname,
|
|
||||||
path: cifs.path,
|
|
||||||
username: cifs.username,
|
|
||||||
mountable: true,
|
|
||||||
embassy_os,
|
|
||||||
}),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
#[command(display(display_none))]
|
|
||||||
pub async fn remove(#[context] ctx: RpcContext, #[arg] id: BackupTargetId) -> Result<(), Error> {
|
|
||||||
let id = if let BackupTargetId::Cifs { id } = id {
|
|
||||||
id
|
|
||||||
} else {
|
|
||||||
return Err(Error::new(
|
|
||||||
eyre!("Backup Target ID {} Not Found", id),
|
|
||||||
crate::ErrorKind::NotFound,
|
|
||||||
));
|
|
||||||
};
|
|
||||||
if sqlx::query!("DELETE FROM cifs_shares WHERE id = $1", id)
|
|
||||||
.execute(&ctx.secret_store)
|
|
||||||
.await?
|
|
||||||
.rows_affected()
|
|
||||||
== 0
|
|
||||||
{
|
|
||||||
return Err(Error::new(
|
|
||||||
eyre!("Backup Target ID {} Not Found", BackupTargetId::Cifs { id }),
|
|
||||||
crate::ErrorKind::NotFound,
|
|
||||||
));
|
|
||||||
};
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn load<Ex>(secrets: &mut Ex, id: i32) -> Result<Cifs, Error>
|
|
||||||
where
|
|
||||||
for<'a> &'a mut Ex: Executor<'a, Database = Postgres>,
|
|
||||||
{
|
|
||||||
let record = sqlx::query!(
|
|
||||||
"SELECT hostname, path, username, password FROM cifs_shares WHERE id = $1",
|
|
||||||
id
|
|
||||||
)
|
|
||||||
.fetch_one(secrets)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
Ok(Cifs {
|
|
||||||
hostname: record.hostname,
|
|
||||||
path: PathBuf::from(record.path),
|
|
||||||
username: record.username,
|
|
||||||
password: record.password,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn list<Ex>(secrets: &mut Ex) -> Result<Vec<(i32, CifsBackupTarget)>, Error>
|
|
||||||
where
|
|
||||||
for<'a> &'a mut Ex: Executor<'a, Database = Postgres>,
|
|
||||||
{
|
|
||||||
let mut records =
|
|
||||||
sqlx::query!("SELECT id, hostname, path, username, password FROM cifs_shares")
|
|
||||||
.fetch_many(secrets);
|
|
||||||
|
|
||||||
let mut cifs = Vec::new();
|
|
||||||
while let Some(query_result) = records.try_next().await? {
|
|
||||||
if let Some(record) = query_result.right() {
|
|
||||||
let mount_info = Cifs {
|
|
||||||
hostname: record.hostname,
|
|
||||||
path: PathBuf::from(record.path),
|
|
||||||
username: record.username,
|
|
||||||
password: record.password,
|
|
||||||
};
|
|
||||||
let embassy_os = async {
|
|
||||||
let guard = TmpMountGuard::mount(&mount_info, ReadOnly).await?;
|
|
||||||
let embassy_os = recovery_info(&guard).await?;
|
|
||||||
guard.unmount().await?;
|
|
||||||
Ok::<_, Error>(embassy_os)
|
|
||||||
}
|
|
||||||
.await;
|
|
||||||
cifs.push((
|
|
||||||
record.id,
|
|
||||||
CifsBackupTarget {
|
|
||||||
hostname: mount_info.hostname,
|
|
||||||
path: mount_info.path,
|
|
||||||
username: mount_info.username,
|
|
||||||
mountable: embassy_os.is_ok(),
|
|
||||||
embassy_os: embassy_os.ok().and_then(|a| a),
|
|
||||||
},
|
|
||||||
));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(cifs)
|
|
||||||
}
|
|
||||||
@@ -1,247 +0,0 @@
|
|||||||
use std::collections::BTreeMap;
|
|
||||||
use std::path::{Path, PathBuf};
|
|
||||||
|
|
||||||
use async_trait::async_trait;
|
|
||||||
use chrono::{DateTime, Utc};
|
|
||||||
use clap::ArgMatches;
|
|
||||||
use color_eyre::eyre::eyre;
|
|
||||||
use digest::generic_array::GenericArray;
|
|
||||||
use digest::OutputSizeUser;
|
|
||||||
use rpc_toolkit::command;
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
use sha2::Sha256;
|
|
||||||
use sqlx::{Executor, Postgres};
|
|
||||||
use tracing::instrument;
|
|
||||||
|
|
||||||
use self::cifs::CifsBackupTarget;
|
|
||||||
use crate::context::RpcContext;
|
|
||||||
use crate::disk::mount::backup::BackupMountGuard;
|
|
||||||
use crate::disk::mount::filesystem::block_dev::BlockDev;
|
|
||||||
use crate::disk::mount::filesystem::cifs::Cifs;
|
|
||||||
use crate::disk::mount::filesystem::{FileSystem, MountType, ReadOnly};
|
|
||||||
use crate::disk::mount::guard::TmpMountGuard;
|
|
||||||
use crate::disk::util::PartitionInfo;
|
|
||||||
use crate::s9pk::manifest::PackageId;
|
|
||||||
use crate::util::serde::{deserialize_from_str, display_serializable, serialize_display};
|
|
||||||
use crate::util::Version;
|
|
||||||
use crate::Error;
|
|
||||||
|
|
||||||
pub mod cifs;
|
|
||||||
|
|
||||||
#[derive(Debug, Deserialize, Serialize)]
|
|
||||||
#[serde(tag = "type")]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
pub enum BackupTarget {
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
Disk {
|
|
||||||
vendor: Option<String>,
|
|
||||||
model: Option<String>,
|
|
||||||
#[serde(flatten)]
|
|
||||||
partition_info: PartitionInfo,
|
|
||||||
},
|
|
||||||
Cifs(CifsBackupTarget),
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, PartialEq, Eq, PartialOrd, Ord)]
|
|
||||||
pub enum BackupTargetId {
|
|
||||||
Disk { logicalname: PathBuf },
|
|
||||||
Cifs { id: i32 },
|
|
||||||
}
|
|
||||||
impl BackupTargetId {
|
|
||||||
pub async fn load<Ex>(self, secrets: &mut Ex) -> Result<BackupTargetFS, Error>
|
|
||||||
where
|
|
||||||
for<'a> &'a mut Ex: Executor<'a, Database = Postgres>,
|
|
||||||
{
|
|
||||||
Ok(match self {
|
|
||||||
BackupTargetId::Disk { logicalname } => {
|
|
||||||
BackupTargetFS::Disk(BlockDev::new(logicalname))
|
|
||||||
}
|
|
||||||
BackupTargetId::Cifs { id } => BackupTargetFS::Cifs(cifs::load(secrets, id).await?),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl std::fmt::Display for BackupTargetId {
|
|
||||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
|
||||||
match self {
|
|
||||||
BackupTargetId::Disk { logicalname } => write!(f, "disk-{}", logicalname.display()),
|
|
||||||
BackupTargetId::Cifs { id } => write!(f, "cifs-{}", id),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl std::str::FromStr for BackupTargetId {
|
|
||||||
type Err = Error;
|
|
||||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
|
||||||
match s.split_once("-") {
|
|
||||||
Some(("disk", logicalname)) => Ok(BackupTargetId::Disk {
|
|
||||||
logicalname: Path::new(logicalname).to_owned(),
|
|
||||||
}),
|
|
||||||
Some(("cifs", id)) => Ok(BackupTargetId::Cifs { id: id.parse()? }),
|
|
||||||
_ => Err(Error::new(
|
|
||||||
eyre!("Invalid Backup Target ID"),
|
|
||||||
crate::ErrorKind::InvalidBackupTargetId,
|
|
||||||
)),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl<'de> Deserialize<'de> for BackupTargetId {
|
|
||||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
|
||||||
where
|
|
||||||
D: serde::Deserializer<'de>,
|
|
||||||
{
|
|
||||||
deserialize_from_str(deserializer)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl Serialize for BackupTargetId {
|
|
||||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
|
||||||
where
|
|
||||||
S: serde::Serializer,
|
|
||||||
{
|
|
||||||
serialize_display(self, serializer)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Deserialize, Serialize)]
|
|
||||||
#[serde(tag = "type")]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
pub enum BackupTargetFS {
|
|
||||||
Disk(BlockDev<PathBuf>),
|
|
||||||
Cifs(Cifs),
|
|
||||||
}
|
|
||||||
#[async_trait]
|
|
||||||
impl FileSystem for BackupTargetFS {
|
|
||||||
async fn mount<P: AsRef<Path> + Send + Sync>(
|
|
||||||
&self,
|
|
||||||
mountpoint: P,
|
|
||||||
mount_type: MountType,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
match self {
|
|
||||||
BackupTargetFS::Disk(a) => a.mount(mountpoint, mount_type).await,
|
|
||||||
BackupTargetFS::Cifs(a) => a.mount(mountpoint, mount_type).await,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
async fn source_hash(
|
|
||||||
&self,
|
|
||||||
) -> Result<GenericArray<u8, <Sha256 as OutputSizeUser>::OutputSize>, Error> {
|
|
||||||
match self {
|
|
||||||
BackupTargetFS::Disk(a) => a.source_hash().await,
|
|
||||||
BackupTargetFS::Cifs(a) => a.source_hash().await,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[command(subcommands(cifs::cifs, list, info))]
|
|
||||||
pub fn target() -> Result<(), Error> {
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[command(display(display_serializable))]
|
|
||||||
pub async fn list(
|
|
||||||
#[context] ctx: RpcContext,
|
|
||||||
) -> Result<BTreeMap<BackupTargetId, BackupTarget>, Error> {
|
|
||||||
let mut sql_handle = ctx.secret_store.acquire().await?;
|
|
||||||
let (disks_res, cifs) =
|
|
||||||
tokio::try_join!(crate::disk::util::list(), cifs::list(&mut sql_handle),)?;
|
|
||||||
Ok(disks_res
|
|
||||||
.into_iter()
|
|
||||||
.flat_map(|mut disk| {
|
|
||||||
std::mem::take(&mut disk.partitions)
|
|
||||||
.into_iter()
|
|
||||||
.map(|part| {
|
|
||||||
(
|
|
||||||
BackupTargetId::Disk {
|
|
||||||
logicalname: part.logicalname.clone(),
|
|
||||||
},
|
|
||||||
BackupTarget::Disk {
|
|
||||||
vendor: disk.vendor.clone(),
|
|
||||||
model: disk.model.clone(),
|
|
||||||
partition_info: part,
|
|
||||||
},
|
|
||||||
)
|
|
||||||
})
|
|
||||||
.collect::<Vec<_>>()
|
|
||||||
})
|
|
||||||
.chain(
|
|
||||||
cifs.into_iter()
|
|
||||||
.map(|(id, cifs)| (BackupTargetId::Cifs { id }, BackupTarget::Cifs(cifs))),
|
|
||||||
)
|
|
||||||
.collect())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone, Debug, Default, Deserialize, Serialize)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
pub struct BackupInfo {
|
|
||||||
pub version: Version,
|
|
||||||
pub timestamp: Option<DateTime<Utc>>,
|
|
||||||
pub package_backups: BTreeMap<PackageId, PackageBackupInfo>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone, Debug, Deserialize, Serialize)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
pub struct PackageBackupInfo {
|
|
||||||
pub title: String,
|
|
||||||
pub version: Version,
|
|
||||||
pub os_version: Version,
|
|
||||||
pub timestamp: DateTime<Utc>,
|
|
||||||
}
|
|
||||||
|
|
||||||
fn display_backup_info(info: BackupInfo, matches: &ArgMatches) {
|
|
||||||
use prettytable::*;
|
|
||||||
|
|
||||||
if matches.is_present("format") {
|
|
||||||
return display_serializable(info, matches);
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut table = Table::new();
|
|
||||||
table.add_row(row![bc =>
|
|
||||||
"ID",
|
|
||||||
"VERSION",
|
|
||||||
"OS VERSION",
|
|
||||||
"TIMESTAMP",
|
|
||||||
]);
|
|
||||||
table.add_row(row![
|
|
||||||
"EMBASSY OS",
|
|
||||||
info.version.as_str(),
|
|
||||||
info.version.as_str(),
|
|
||||||
&if let Some(ts) = &info.timestamp {
|
|
||||||
ts.to_string()
|
|
||||||
} else {
|
|
||||||
"N/A".to_owned()
|
|
||||||
},
|
|
||||||
]);
|
|
||||||
for (id, info) in info.package_backups {
|
|
||||||
let row = row![
|
|
||||||
id.as_str(),
|
|
||||||
info.version.as_str(),
|
|
||||||
info.os_version.as_str(),
|
|
||||||
&info.timestamp.to_string(),
|
|
||||||
];
|
|
||||||
table.add_row(row);
|
|
||||||
}
|
|
||||||
table.print_tty(false).unwrap();
|
|
||||||
}
|
|
||||||
|
|
||||||
#[command(display(display_backup_info))]
|
|
||||||
#[instrument(skip(ctx, password))]
|
|
||||||
pub async fn info(
|
|
||||||
#[context] ctx: RpcContext,
|
|
||||||
#[arg(rename = "target-id")] target_id: BackupTargetId,
|
|
||||||
#[arg] password: String,
|
|
||||||
) -> Result<BackupInfo, Error> {
|
|
||||||
let guard = BackupMountGuard::mount(
|
|
||||||
TmpMountGuard::mount(
|
|
||||||
&target_id
|
|
||||||
.load(&mut ctx.secret_store.acquire().await?)
|
|
||||||
.await?,
|
|
||||||
ReadOnly,
|
|
||||||
)
|
|
||||||
.await?,
|
|
||||||
&password,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
let res = guard.metadata.clone();
|
|
||||||
|
|
||||||
guard.unmount().await?;
|
|
||||||
|
|
||||||
Ok(res)
|
|
||||||
}
|
|
||||||
@@ -1,163 +0,0 @@
|
|||||||
use avahi_sys::{
|
|
||||||
self, avahi_client_errno, avahi_entry_group_add_service, avahi_entry_group_commit,
|
|
||||||
avahi_strerror, AvahiClient,
|
|
||||||
};
|
|
||||||
|
|
||||||
fn log_str_error(action: &str, e: i32) {
|
|
||||||
unsafe {
|
|
||||||
let e_str = avahi_strerror(e);
|
|
||||||
eprintln!(
|
|
||||||
"Could not {}: {:?}",
|
|
||||||
action,
|
|
||||||
std::ffi::CStr::from_ptr(e_str)
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn main() {
|
|
||||||
let aliases: Vec<_> = std::env::args().skip(1).collect();
|
|
||||||
unsafe {
|
|
||||||
let simple_poll = avahi_sys::avahi_simple_poll_new();
|
|
||||||
let poll = avahi_sys::avahi_simple_poll_get(simple_poll);
|
|
||||||
let mut box_err = Box::pin(0 as i32);
|
|
||||||
let err_c: *mut i32 = box_err.as_mut().get_mut();
|
|
||||||
let avahi_client = avahi_sys::avahi_client_new(
|
|
||||||
poll,
|
|
||||||
avahi_sys::AvahiClientFlags::AVAHI_CLIENT_NO_FAIL,
|
|
||||||
Some(client_callback),
|
|
||||||
std::ptr::null_mut(),
|
|
||||||
err_c,
|
|
||||||
);
|
|
||||||
if avahi_client == std::ptr::null_mut::<AvahiClient>() {
|
|
||||||
log_str_error("create Avahi client", *box_err);
|
|
||||||
panic!("Failed to create Avahi Client");
|
|
||||||
}
|
|
||||||
let group = avahi_sys::avahi_entry_group_new(
|
|
||||||
avahi_client,
|
|
||||||
Some(entry_group_callback),
|
|
||||||
std::ptr::null_mut(),
|
|
||||||
);
|
|
||||||
if group == std::ptr::null_mut() {
|
|
||||||
log_str_error("create Avahi entry group", avahi_client_errno(avahi_client));
|
|
||||||
panic!("Failed to create Avahi Entry Group");
|
|
||||||
}
|
|
||||||
let mut hostname_buf = vec![0];
|
|
||||||
let hostname_raw = avahi_sys::avahi_client_get_host_name_fqdn(avahi_client);
|
|
||||||
hostname_buf.extend_from_slice(std::ffi::CStr::from_ptr(hostname_raw).to_bytes_with_nul());
|
|
||||||
let buflen = hostname_buf.len();
|
|
||||||
debug_assert!(hostname_buf.ends_with(b".local\0"));
|
|
||||||
debug_assert!(!hostname_buf[..(buflen - 7)].contains(&b'.'));
|
|
||||||
// assume fixed length prefix on hostname due to local address
|
|
||||||
hostname_buf[0] = (buflen - 8) as u8; // set the prefix length to len - 8 (leading byte, .local, nul) for the main address
|
|
||||||
hostname_buf[buflen - 7] = 5; // set the prefix length to 5 for "local"
|
|
||||||
let mut res;
|
|
||||||
let http_tcp_cstr =
|
|
||||||
std::ffi::CString::new("_http._tcp").expect("Could not cast _http._tcp to c string");
|
|
||||||
res = avahi_entry_group_add_service(
|
|
||||||
group,
|
|
||||||
avahi_sys::AVAHI_IF_UNSPEC,
|
|
||||||
avahi_sys::AVAHI_PROTO_UNSPEC,
|
|
||||||
avahi_sys::AvahiPublishFlags_AVAHI_PUBLISH_USE_MULTICAST,
|
|
||||||
hostname_raw,
|
|
||||||
http_tcp_cstr.as_ptr(),
|
|
||||||
std::ptr::null(),
|
|
||||||
std::ptr::null(),
|
|
||||||
443,
|
|
||||||
// below is a secret final argument that the type signature of this function does not tell you that it
|
|
||||||
// needs. This is because the C lib function takes a variable number of final arguments indicating the
|
|
||||||
// desired TXT records to add to this service entry. The way it decides when to stop taking arguments
|
|
||||||
// from the stack and dereferencing them is when it finds a null pointer...because fuck you, that's why.
|
|
||||||
// The consequence of this is that forgetting this last argument will cause segfaults or other undefined
|
|
||||||
// behavior. Welcome back to the stone age motherfucker.
|
|
||||||
std::ptr::null::<libc::c_char>(),
|
|
||||||
);
|
|
||||||
if res < avahi_sys::AVAHI_OK {
|
|
||||||
log_str_error("add service to Avahi entry group", res);
|
|
||||||
panic!("Failed to load Avahi services");
|
|
||||||
}
|
|
||||||
eprintln!("Published {:?}", std::ffi::CStr::from_ptr(hostname_raw));
|
|
||||||
for alias in aliases {
|
|
||||||
let lan_address = alias + ".local";
|
|
||||||
let lan_address_ptr = std::ffi::CString::new(lan_address)
|
|
||||||
.expect("Could not cast lan address to c string");
|
|
||||||
res = avahi_sys::avahi_entry_group_add_record(
|
|
||||||
group,
|
|
||||||
avahi_sys::AVAHI_IF_UNSPEC,
|
|
||||||
avahi_sys::AVAHI_PROTO_UNSPEC,
|
|
||||||
avahi_sys::AvahiPublishFlags_AVAHI_PUBLISH_USE_MULTICAST
|
|
||||||
| avahi_sys::AvahiPublishFlags_AVAHI_PUBLISH_ALLOW_MULTIPLE,
|
|
||||||
lan_address_ptr.as_ptr(),
|
|
||||||
avahi_sys::AVAHI_DNS_CLASS_IN as u16,
|
|
||||||
avahi_sys::AVAHI_DNS_TYPE_CNAME as u16,
|
|
||||||
avahi_sys::AVAHI_DEFAULT_TTL,
|
|
||||||
hostname_buf.as_ptr().cast(),
|
|
||||||
hostname_buf.len(),
|
|
||||||
);
|
|
||||||
if res < avahi_sys::AVAHI_OK {
|
|
||||||
log_str_error("add CNAME record to Avahi entry group", res);
|
|
||||||
panic!("Failed to load Avahi services");
|
|
||||||
}
|
|
||||||
eprintln!("Published {:?}", lan_address_ptr);
|
|
||||||
}
|
|
||||||
let commit_err = avahi_entry_group_commit(group);
|
|
||||||
if commit_err < avahi_sys::AVAHI_OK {
|
|
||||||
log_str_error("reset Avahi entry group", commit_err);
|
|
||||||
panic!("Failed to load Avahi services: reset");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
std::thread::park()
|
|
||||||
}
|
|
||||||
|
|
||||||
unsafe extern "C" fn entry_group_callback(
|
|
||||||
_group: *mut avahi_sys::AvahiEntryGroup,
|
|
||||||
state: avahi_sys::AvahiEntryGroupState,
|
|
||||||
_userdata: *mut core::ffi::c_void,
|
|
||||||
) {
|
|
||||||
match state {
|
|
||||||
avahi_sys::AvahiEntryGroupState_AVAHI_ENTRY_GROUP_FAILURE => {
|
|
||||||
eprintln!("AvahiCallback: EntryGroupState = AVAHI_ENTRY_GROUP_FAILURE");
|
|
||||||
}
|
|
||||||
avahi_sys::AvahiEntryGroupState_AVAHI_ENTRY_GROUP_COLLISION => {
|
|
||||||
eprintln!("AvahiCallback: EntryGroupState = AVAHI_ENTRY_GROUP_COLLISION");
|
|
||||||
}
|
|
||||||
avahi_sys::AvahiEntryGroupState_AVAHI_ENTRY_GROUP_UNCOMMITED => {
|
|
||||||
eprintln!("AvahiCallback: EntryGroupState = AVAHI_ENTRY_GROUP_UNCOMMITED");
|
|
||||||
}
|
|
||||||
avahi_sys::AvahiEntryGroupState_AVAHI_ENTRY_GROUP_ESTABLISHED => {
|
|
||||||
eprintln!("AvahiCallback: EntryGroupState = AVAHI_ENTRY_GROUP_ESTABLISHED");
|
|
||||||
}
|
|
||||||
avahi_sys::AvahiEntryGroupState_AVAHI_ENTRY_GROUP_REGISTERING => {
|
|
||||||
eprintln!("AvahiCallback: EntryGroupState = AVAHI_ENTRY_GROUP_REGISTERING");
|
|
||||||
}
|
|
||||||
other => {
|
|
||||||
eprintln!("AvahiCallback: EntryGroupState = {}", other);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
unsafe extern "C" fn client_callback(
|
|
||||||
_group: *mut avahi_sys::AvahiClient,
|
|
||||||
state: avahi_sys::AvahiClientState,
|
|
||||||
_userdata: *mut core::ffi::c_void,
|
|
||||||
) {
|
|
||||||
match state {
|
|
||||||
avahi_sys::AvahiClientState_AVAHI_CLIENT_FAILURE => {
|
|
||||||
eprintln!("AvahiCallback: ClientState = AVAHI_CLIENT_FAILURE");
|
|
||||||
}
|
|
||||||
avahi_sys::AvahiClientState_AVAHI_CLIENT_S_RUNNING => {
|
|
||||||
eprintln!("AvahiCallback: ClientState = AVAHI_CLIENT_S_RUNNING");
|
|
||||||
}
|
|
||||||
avahi_sys::AvahiClientState_AVAHI_CLIENT_CONNECTING => {
|
|
||||||
eprintln!("AvahiCallback: ClientState = AVAHI_CLIENT_CONNECTING");
|
|
||||||
}
|
|
||||||
avahi_sys::AvahiClientState_AVAHI_CLIENT_S_COLLISION => {
|
|
||||||
eprintln!("AvahiCallback: ClientState = AVAHI_CLIENT_S_COLLISION");
|
|
||||||
}
|
|
||||||
avahi_sys::AvahiClientState_AVAHI_CLIENT_S_REGISTERING => {
|
|
||||||
eprintln!("AvahiCallback: ClientState = AVAHI_CLIENT_S_REGISTERING");
|
|
||||||
}
|
|
||||||
other => {
|
|
||||||
eprintln!("AvahiCallback: ClientState = {}", other);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,61 +0,0 @@
|
|||||||
use clap::Arg;
|
|
||||||
use embassy::context::CliContext;
|
|
||||||
use embassy::util::logger::EmbassyLogger;
|
|
||||||
use embassy::version::{Current, VersionT};
|
|
||||||
use embassy::Error;
|
|
||||||
use rpc_toolkit::run_cli;
|
|
||||||
use rpc_toolkit::yajrc::RpcError;
|
|
||||||
use serde_json::Value;
|
|
||||||
|
|
||||||
lazy_static::lazy_static! {
|
|
||||||
static ref VERSION_STRING: String = Current::new().semver().to_string();
|
|
||||||
}
|
|
||||||
|
|
||||||
fn inner_main() -> Result<(), Error> {
|
|
||||||
run_cli!({
|
|
||||||
command: embassy::main_api,
|
|
||||||
app: app => app
|
|
||||||
.name("Embassy CLI")
|
|
||||||
.version(&**VERSION_STRING)
|
|
||||||
.arg(
|
|
||||||
clap::Arg::with_name("config")
|
|
||||||
.short('c')
|
|
||||||
.long("config")
|
|
||||||
.takes_value(true),
|
|
||||||
)
|
|
||||||
.arg(Arg::with_name("host").long("host").short('h').takes_value(true))
|
|
||||||
.arg(Arg::with_name("proxy").long("proxy").short('p').takes_value(true)),
|
|
||||||
context: matches => {
|
|
||||||
EmbassyLogger::init();
|
|
||||||
CliContext::init(matches)?
|
|
||||||
},
|
|
||||||
exit: |e: RpcError| {
|
|
||||||
match e.data {
|
|
||||||
Some(Value::String(s)) => eprintln!("{}: {}", e.message, s),
|
|
||||||
Some(Value::Object(o)) => if let Some(Value::String(s)) = o.get("details") {
|
|
||||||
eprintln!("{}: {}", e.message, s);
|
|
||||||
if let Some(Value::String(s)) = o.get("debug") {
|
|
||||||
tracing::debug!("{}", s)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Some(a) => eprintln!("{}: {}", e.message, a),
|
|
||||||
None => eprintln!("{}", e.message),
|
|
||||||
}
|
|
||||||
|
|
||||||
std::process::exit(e.code);
|
|
||||||
}
|
|
||||||
});
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn main() {
|
|
||||||
match inner_main() {
|
|
||||||
Ok(_) => (),
|
|
||||||
Err(e) => {
|
|
||||||
eprintln!("{}", e.source);
|
|
||||||
tracing::debug!("{:?}", e.source);
|
|
||||||
drop(e.source);
|
|
||||||
std::process::exit(e.kind as i32)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,239 +0,0 @@
|
|||||||
use std::path::Path;
|
|
||||||
use std::sync::Arc;
|
|
||||||
use std::time::Duration;
|
|
||||||
|
|
||||||
use embassy::context::rpc::RpcContextConfig;
|
|
||||||
use embassy::context::{DiagnosticContext, SetupContext};
|
|
||||||
use embassy::disk::fsck::RepairStrategy;
|
|
||||||
use embassy::disk::main::DEFAULT_PASSWORD;
|
|
||||||
use embassy::disk::REPAIR_DISK_PATH;
|
|
||||||
use embassy::init::STANDBY_MODE_PATH;
|
|
||||||
use embassy::middleware::cors::cors;
|
|
||||||
use embassy::middleware::diagnostic::diagnostic;
|
|
||||||
#[cfg(feature = "avahi")]
|
|
||||||
use embassy::net::mdns::MdnsController;
|
|
||||||
use embassy::shutdown::Shutdown;
|
|
||||||
use embassy::sound::CHIME;
|
|
||||||
use embassy::util::logger::EmbassyLogger;
|
|
||||||
use embassy::util::Invoke;
|
|
||||||
use embassy::{Error, ErrorKind, ResultExt};
|
|
||||||
use http::StatusCode;
|
|
||||||
use rpc_toolkit::rpc_server;
|
|
||||||
use tokio::process::Command;
|
|
||||||
use tracing::instrument;
|
|
||||||
|
|
||||||
fn status_fn(_: i32) -> StatusCode {
|
|
||||||
StatusCode::OK
|
|
||||||
}
|
|
||||||
|
|
||||||
#[instrument]
|
|
||||||
async fn setup_or_init(cfg_path: Option<&str>) -> Result<(), Error> {
|
|
||||||
if tokio::fs::metadata("/embassy-os/disk.guid").await.is_err() {
|
|
||||||
#[cfg(feature = "avahi")]
|
|
||||||
let _mdns = MdnsController::init();
|
|
||||||
tokio::fs::write(
|
|
||||||
"/etc/nginx/sites-available/default",
|
|
||||||
include_str!("../nginx/setup-wizard.conf"),
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
.with_ctx(|_| {
|
|
||||||
(
|
|
||||||
embassy::ErrorKind::Filesystem,
|
|
||||||
"/etc/nginx/sites-available/default",
|
|
||||||
)
|
|
||||||
})?;
|
|
||||||
Command::new("systemctl")
|
|
||||||
.arg("reload")
|
|
||||||
.arg("nginx")
|
|
||||||
.invoke(embassy::ErrorKind::Nginx)
|
|
||||||
.await?;
|
|
||||||
let ctx = SetupContext::init(cfg_path).await?;
|
|
||||||
tokio::time::sleep(Duration::from_secs(1)).await; // let the record state that I hate this
|
|
||||||
CHIME.play().await?;
|
|
||||||
rpc_server!({
|
|
||||||
command: embassy::setup_api,
|
|
||||||
context: ctx.clone(),
|
|
||||||
status: status_fn,
|
|
||||||
middleware: [
|
|
||||||
cors,
|
|
||||||
]
|
|
||||||
})
|
|
||||||
.with_graceful_shutdown({
|
|
||||||
let mut shutdown = ctx.shutdown.subscribe();
|
|
||||||
async move {
|
|
||||||
shutdown.recv().await.expect("context dropped");
|
|
||||||
}
|
|
||||||
})
|
|
||||||
.await
|
|
||||||
.with_kind(embassy::ErrorKind::Network)?;
|
|
||||||
} else {
|
|
||||||
let cfg = RpcContextConfig::load(cfg_path).await?;
|
|
||||||
let guid_string = tokio::fs::read_to_string("/embassy-os/disk.guid") // unique identifier for volume group - keeps track of the disk that goes with your embassy
|
|
||||||
.await?;
|
|
||||||
let guid = guid_string.trim();
|
|
||||||
let requires_reboot = embassy::disk::main::import(
|
|
||||||
guid,
|
|
||||||
cfg.datadir(),
|
|
||||||
if tokio::fs::metadata(REPAIR_DISK_PATH).await.is_ok() {
|
|
||||||
RepairStrategy::Aggressive
|
|
||||||
} else {
|
|
||||||
RepairStrategy::Preen
|
|
||||||
},
|
|
||||||
DEFAULT_PASSWORD,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
if tokio::fs::metadata(REPAIR_DISK_PATH).await.is_ok() {
|
|
||||||
tokio::fs::remove_file(REPAIR_DISK_PATH)
|
|
||||||
.await
|
|
||||||
.with_ctx(|_| (embassy::ErrorKind::Filesystem, REPAIR_DISK_PATH))?;
|
|
||||||
}
|
|
||||||
if requires_reboot.0 {
|
|
||||||
embassy::disk::main::export(guid, cfg.datadir()).await?;
|
|
||||||
Command::new("reboot")
|
|
||||||
.invoke(embassy::ErrorKind::Unknown)
|
|
||||||
.await?;
|
|
||||||
}
|
|
||||||
tracing::info!("Loaded Disk");
|
|
||||||
embassy::init::init(&cfg).await?;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn run_script_if_exists<P: AsRef<Path>>(path: P) {
|
|
||||||
let script = path.as_ref();
|
|
||||||
if script.exists() {
|
|
||||||
match Command::new("/bin/bash").arg(script).spawn() {
|
|
||||||
Ok(mut c) => {
|
|
||||||
if let Err(e) = c.wait().await {
|
|
||||||
tracing::error!("Error Running {}: {}", script.display(), e);
|
|
||||||
tracing::debug!("{:?}", e);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Err(e) => {
|
|
||||||
tracing::error!("Error Running {}: {}", script.display(), e);
|
|
||||||
tracing::debug!("{:?}", e);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[instrument]
|
|
||||||
async fn inner_main(cfg_path: Option<&str>) -> Result<Option<Shutdown>, Error> {
|
|
||||||
if tokio::fs::metadata(STANDBY_MODE_PATH).await.is_ok() {
|
|
||||||
tokio::fs::remove_file(STANDBY_MODE_PATH).await?;
|
|
||||||
Command::new("sync").invoke(ErrorKind::Filesystem).await?;
|
|
||||||
embassy::sound::SHUTDOWN.play().await?;
|
|
||||||
futures::future::pending::<()>().await;
|
|
||||||
}
|
|
||||||
|
|
||||||
embassy::sound::BEP.play().await?;
|
|
||||||
|
|
||||||
run_script_if_exists("/embassy-os/preinit.sh").await;
|
|
||||||
|
|
||||||
let res = if let Err(e) = setup_or_init(cfg_path).await {
|
|
||||||
async {
|
|
||||||
tracing::error!("{}", e.source);
|
|
||||||
tracing::debug!("{}", e.source);
|
|
||||||
embassy::sound::BEETHOVEN.play().await?;
|
|
||||||
#[cfg(feature = "avahi")]
|
|
||||||
let _mdns = MdnsController::init();
|
|
||||||
tokio::fs::write(
|
|
||||||
"/etc/nginx/sites-available/default",
|
|
||||||
include_str!("../nginx/diagnostic-ui.conf"),
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
.with_ctx(|_| {
|
|
||||||
(
|
|
||||||
embassy::ErrorKind::Filesystem,
|
|
||||||
"/etc/nginx/sites-available/default",
|
|
||||||
)
|
|
||||||
})?;
|
|
||||||
Command::new("systemctl")
|
|
||||||
.arg("reload")
|
|
||||||
.arg("nginx")
|
|
||||||
.invoke(embassy::ErrorKind::Nginx)
|
|
||||||
.await?;
|
|
||||||
let ctx = DiagnosticContext::init(
|
|
||||||
cfg_path,
|
|
||||||
if tokio::fs::metadata("/embassy-os/disk.guid").await.is_ok() {
|
|
||||||
Some(Arc::new(
|
|
||||||
tokio::fs::read_to_string("/embassy-os/disk.guid") // unique identifier for volume group - keeps track of the disk that goes with your embassy
|
|
||||||
.await?
|
|
||||||
.trim()
|
|
||||||
.to_owned(),
|
|
||||||
))
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
},
|
|
||||||
e,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
let mut shutdown_recv = ctx.shutdown.subscribe();
|
|
||||||
rpc_server!({
|
|
||||||
command: embassy::diagnostic_api,
|
|
||||||
context: ctx.clone(),
|
|
||||||
status: status_fn,
|
|
||||||
middleware: [
|
|
||||||
cors,
|
|
||||||
diagnostic,
|
|
||||||
]
|
|
||||||
})
|
|
||||||
.with_graceful_shutdown({
|
|
||||||
let mut shutdown = ctx.shutdown.subscribe();
|
|
||||||
async move {
|
|
||||||
shutdown.recv().await.expect("context dropped");
|
|
||||||
}
|
|
||||||
})
|
|
||||||
.await
|
|
||||||
.with_kind(embassy::ErrorKind::Network)?;
|
|
||||||
|
|
||||||
Ok::<_, Error>(
|
|
||||||
shutdown_recv
|
|
||||||
.recv()
|
|
||||||
.await
|
|
||||||
.with_kind(embassy::ErrorKind::Network)?,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
.await
|
|
||||||
} else {
|
|
||||||
Ok(None)
|
|
||||||
};
|
|
||||||
|
|
||||||
run_script_if_exists("/embassy-os/postinit.sh").await;
|
|
||||||
|
|
||||||
res
|
|
||||||
}
|
|
||||||
|
|
||||||
fn main() {
|
|
||||||
let matches = clap::App::new("embassyd")
|
|
||||||
.arg(
|
|
||||||
clap::Arg::with_name("config")
|
|
||||||
.short('c')
|
|
||||||
.long("config")
|
|
||||||
.takes_value(true),
|
|
||||||
)
|
|
||||||
.get_matches();
|
|
||||||
|
|
||||||
EmbassyLogger::init();
|
|
||||||
|
|
||||||
let cfg_path = matches.value_of("config");
|
|
||||||
let res = {
|
|
||||||
let rt = tokio::runtime::Builder::new_multi_thread()
|
|
||||||
.enable_all()
|
|
||||||
.build()
|
|
||||||
.expect("failed to initialize runtime");
|
|
||||||
rt.block_on(inner_main(cfg_path))
|
|
||||||
};
|
|
||||||
|
|
||||||
match res {
|
|
||||||
Ok(Some(shutdown)) => shutdown.execute(),
|
|
||||||
Ok(None) => (),
|
|
||||||
Err(e) => {
|
|
||||||
eprintln!("{}", e.source);
|
|
||||||
tracing::debug!("{:?}", e.source);
|
|
||||||
drop(e.source);
|
|
||||||
std::process::exit(e.kind as i32)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,60 +0,0 @@
|
|||||||
use embassy::context::SdkContext;
|
|
||||||
use embassy::util::logger::EmbassyLogger;
|
|
||||||
use embassy::version::{Current, VersionT};
|
|
||||||
use embassy::Error;
|
|
||||||
use rpc_toolkit::run_cli;
|
|
||||||
use rpc_toolkit::yajrc::RpcError;
|
|
||||||
use serde_json::Value;
|
|
||||||
|
|
||||||
lazy_static::lazy_static! {
|
|
||||||
static ref VERSION_STRING: String = Current::new().semver().to_string();
|
|
||||||
}
|
|
||||||
|
|
||||||
fn inner_main() -> Result<(), Error> {
|
|
||||||
run_cli!({
|
|
||||||
command: embassy::portable_api,
|
|
||||||
app: app => app
|
|
||||||
.name("Embassy SDK")
|
|
||||||
.version(&**VERSION_STRING)
|
|
||||||
.arg(
|
|
||||||
clap::Arg::with_name("config")
|
|
||||||
.short('c')
|
|
||||||
.long("config")
|
|
||||||
.takes_value(true),
|
|
||||||
),
|
|
||||||
context: matches => {
|
|
||||||
if let Err(_) = std::env::var("RUST_LOG") {
|
|
||||||
std::env::set_var("RUST_LOG", "embassy=warn,js_engine=warn");
|
|
||||||
}
|
|
||||||
EmbassyLogger::init();
|
|
||||||
SdkContext::init(matches)?
|
|
||||||
},
|
|
||||||
exit: |e: RpcError| {
|
|
||||||
match e.data {
|
|
||||||
Some(Value::String(s)) => eprintln!("{}: {}", e.message, s),
|
|
||||||
Some(Value::Object(o)) => if let Some(Value::String(s)) = o.get("details") {
|
|
||||||
eprintln!("{}: {}", e.message, s);
|
|
||||||
if let Some(Value::String(s)) = o.get("debug") {
|
|
||||||
tracing::debug!("{}", s)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Some(a) => eprintln!("{}: {}", e.message, a),
|
|
||||||
None => eprintln!("{}", e.message),
|
|
||||||
}
|
|
||||||
std::process::exit(e.code);
|
|
||||||
}
|
|
||||||
});
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn main() {
|
|
||||||
match inner_main() {
|
|
||||||
Ok(_) => (),
|
|
||||||
Err(e) => {
|
|
||||||
eprintln!("{}", e.source);
|
|
||||||
tracing::debug!("{:?}", e.source);
|
|
||||||
drop(e.source);
|
|
||||||
std::process::exit(e.kind as i32)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,379 +0,0 @@
|
|||||||
use std::sync::Arc;
|
|
||||||
use std::time::Duration;
|
|
||||||
|
|
||||||
use color_eyre::eyre::eyre;
|
|
||||||
use embassy::context::{DiagnosticContext, RpcContext};
|
|
||||||
use embassy::core::rpc_continuations::RequestGuid;
|
|
||||||
use embassy::db::subscribe;
|
|
||||||
use embassy::middleware::auth::auth;
|
|
||||||
use embassy::middleware::cors::cors;
|
|
||||||
use embassy::middleware::db::db as db_middleware;
|
|
||||||
use embassy::middleware::diagnostic::diagnostic;
|
|
||||||
#[cfg(feature = "avahi")]
|
|
||||||
use embassy::net::mdns::MdnsController;
|
|
||||||
use embassy::net::tor::tor_health_check;
|
|
||||||
use embassy::shutdown::Shutdown;
|
|
||||||
use embassy::system::launch_metrics_task;
|
|
||||||
use embassy::util::logger::EmbassyLogger;
|
|
||||||
use embassy::util::{daemon, Invoke};
|
|
||||||
use embassy::{static_server, Error, ErrorKind, ResultExt};
|
|
||||||
use futures::{FutureExt, TryFutureExt};
|
|
||||||
use reqwest::{Client, Proxy};
|
|
||||||
use rpc_toolkit::hyper::{Body, Response, Server, StatusCode};
|
|
||||||
use rpc_toolkit::rpc_server;
|
|
||||||
use tokio::process::Command;
|
|
||||||
use tokio::signal::unix::signal;
|
|
||||||
use tracing::instrument;
|
|
||||||
|
|
||||||
fn status_fn(_: i32) -> StatusCode {
|
|
||||||
StatusCode::OK
|
|
||||||
}
|
|
||||||
|
|
||||||
fn err_to_500(e: Error) -> Response<Body> {
|
|
||||||
tracing::error!("{}", e);
|
|
||||||
tracing::debug!("{:?}", e);
|
|
||||||
Response::builder()
|
|
||||||
.status(StatusCode::INTERNAL_SERVER_ERROR)
|
|
||||||
.body(Body::empty())
|
|
||||||
.unwrap()
|
|
||||||
}
|
|
||||||
|
|
||||||
#[instrument]
|
|
||||||
async fn inner_main(cfg_path: Option<&str>) -> Result<Option<Shutdown>, Error> {
|
|
||||||
let (rpc_ctx, shutdown) = {
|
|
||||||
let rpc_ctx = RpcContext::init(
|
|
||||||
cfg_path,
|
|
||||||
Arc::new(
|
|
||||||
tokio::fs::read_to_string("/embassy-os/disk.guid") // unique identifier for volume group - keeps track of the disk that goes with your embassy
|
|
||||||
.await?
|
|
||||||
.trim()
|
|
||||||
.to_owned(),
|
|
||||||
),
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
let mut shutdown_recv = rpc_ctx.shutdown.subscribe();
|
|
||||||
|
|
||||||
let sig_handler_ctx = rpc_ctx.clone();
|
|
||||||
let sig_handler = tokio::spawn(async move {
|
|
||||||
use tokio::signal::unix::SignalKind;
|
|
||||||
futures::future::select_all(
|
|
||||||
[
|
|
||||||
SignalKind::interrupt(),
|
|
||||||
SignalKind::quit(),
|
|
||||||
SignalKind::terminate(),
|
|
||||||
]
|
|
||||||
.iter()
|
|
||||||
.map(|s| {
|
|
||||||
async move {
|
|
||||||
signal(*s)
|
|
||||||
.expect(&format!("register {:?} handler", s))
|
|
||||||
.recv()
|
|
||||||
.await
|
|
||||||
}
|
|
||||||
.boxed()
|
|
||||||
}),
|
|
||||||
)
|
|
||||||
.await;
|
|
||||||
sig_handler_ctx
|
|
||||||
.shutdown
|
|
||||||
.send(None)
|
|
||||||
.map_err(|_| ())
|
|
||||||
.expect("send shutdown signal");
|
|
||||||
});
|
|
||||||
|
|
||||||
let mut db = rpc_ctx.db.handle();
|
|
||||||
embassy::hostname::sync_hostname(&mut db).await?;
|
|
||||||
let receipts = embassy::context::rpc::RpcSetNginxReceipts::new(&mut db).await?;
|
|
||||||
|
|
||||||
rpc_ctx.set_nginx_conf(&mut db, receipts).await?;
|
|
||||||
drop(db);
|
|
||||||
let auth = auth(rpc_ctx.clone());
|
|
||||||
let db_middleware = db_middleware(rpc_ctx.clone());
|
|
||||||
let ctx = rpc_ctx.clone();
|
|
||||||
let server = rpc_server!({
|
|
||||||
command: embassy::main_api,
|
|
||||||
context: ctx,
|
|
||||||
status: status_fn,
|
|
||||||
middleware: [
|
|
||||||
cors,
|
|
||||||
auth,
|
|
||||||
db_middleware,
|
|
||||||
]
|
|
||||||
})
|
|
||||||
.with_graceful_shutdown({
|
|
||||||
let mut shutdown = rpc_ctx.shutdown.subscribe();
|
|
||||||
async move {
|
|
||||||
shutdown.recv().await.expect("context dropped");
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
let metrics_ctx = rpc_ctx.clone();
|
|
||||||
let metrics_task = tokio::spawn(async move {
|
|
||||||
launch_metrics_task(&metrics_ctx.metrics_cache, || {
|
|
||||||
metrics_ctx.shutdown.subscribe()
|
|
||||||
})
|
|
||||||
.await
|
|
||||||
});
|
|
||||||
|
|
||||||
let ws_ctx = rpc_ctx.clone();
|
|
||||||
let ws_server = {
|
|
||||||
let builder = Server::bind(&ws_ctx.bind_ws);
|
|
||||||
|
|
||||||
let make_svc = ::rpc_toolkit::hyper::service::make_service_fn(move |_| {
|
|
||||||
let ctx = ws_ctx.clone();
|
|
||||||
async move {
|
|
||||||
Ok::<_, ::rpc_toolkit::hyper::Error>(::rpc_toolkit::hyper::service::service_fn(
|
|
||||||
move |req| {
|
|
||||||
let ctx = ctx.clone();
|
|
||||||
async move {
|
|
||||||
tracing::debug!("Request to {}", req.uri().path());
|
|
||||||
match req.uri().path() {
|
|
||||||
"/ws/db" => {
|
|
||||||
Ok(subscribe(ctx, req).await.unwrap_or_else(err_to_500))
|
|
||||||
}
|
|
||||||
path if path.starts_with("/ws/rpc/") => {
|
|
||||||
match RequestGuid::from(
|
|
||||||
path.strip_prefix("/ws/rpc/").unwrap(),
|
|
||||||
) {
|
|
||||||
None => {
|
|
||||||
tracing::debug!("No Guid Path");
|
|
||||||
Response::builder()
|
|
||||||
.status(StatusCode::BAD_REQUEST)
|
|
||||||
.body(Body::empty())
|
|
||||||
}
|
|
||||||
Some(guid) => {
|
|
||||||
match ctx.get_ws_continuation_handler(&guid).await {
|
|
||||||
Some(cont) => match cont(req).await {
|
|
||||||
Ok(r) => Ok(r),
|
|
||||||
Err(e) => Response::builder()
|
|
||||||
.status(
|
|
||||||
StatusCode::INTERNAL_SERVER_ERROR,
|
|
||||||
)
|
|
||||||
.body(Body::from(format!("{}", e))),
|
|
||||||
},
|
|
||||||
_ => Response::builder()
|
|
||||||
.status(StatusCode::NOT_FOUND)
|
|
||||||
.body(Body::empty()),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
path if path.starts_with("/rest/rpc/") => {
|
|
||||||
match RequestGuid::from(
|
|
||||||
path.strip_prefix("/rest/rpc/").unwrap(),
|
|
||||||
) {
|
|
||||||
None => {
|
|
||||||
tracing::debug!("No Guid Path");
|
|
||||||
Response::builder()
|
|
||||||
.status(StatusCode::BAD_REQUEST)
|
|
||||||
.body(Body::empty())
|
|
||||||
}
|
|
||||||
Some(guid) => {
|
|
||||||
match ctx.get_rest_continuation_handler(&guid).await
|
|
||||||
{
|
|
||||||
None => Response::builder()
|
|
||||||
.status(StatusCode::NOT_FOUND)
|
|
||||||
.body(Body::empty()),
|
|
||||||
Some(cont) => match cont(req).await {
|
|
||||||
Ok(r) => Ok(r),
|
|
||||||
Err(e) => Response::builder()
|
|
||||||
.status(
|
|
||||||
StatusCode::INTERNAL_SERVER_ERROR,
|
|
||||||
)
|
|
||||||
.body(Body::from(format!("{}", e))),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
_ => Response::builder()
|
|
||||||
.status(StatusCode::NOT_FOUND)
|
|
||||||
.body(Body::empty()),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
))
|
|
||||||
}
|
|
||||||
});
|
|
||||||
builder.serve(make_svc)
|
|
||||||
}
|
|
||||||
.with_graceful_shutdown({
|
|
||||||
let mut shutdown = rpc_ctx.shutdown.subscribe();
|
|
||||||
async move {
|
|
||||||
shutdown.recv().await.expect("context dropped");
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
let file_server_ctx = rpc_ctx.clone();
|
|
||||||
let file_server = {
|
|
||||||
static_server::init(file_server_ctx, {
|
|
||||||
let mut shutdown = rpc_ctx.shutdown.subscribe();
|
|
||||||
async move {
|
|
||||||
shutdown.recv().await.expect("context dropped");
|
|
||||||
}
|
|
||||||
})
|
|
||||||
};
|
|
||||||
|
|
||||||
let tor_health_ctx = rpc_ctx.clone();
|
|
||||||
let tor_client = Client::builder()
|
|
||||||
.proxy(
|
|
||||||
Proxy::http(format!(
|
|
||||||
"socks5h://{}:{}",
|
|
||||||
rpc_ctx.tor_socks.ip(),
|
|
||||||
rpc_ctx.tor_socks.port()
|
|
||||||
))
|
|
||||||
.with_kind(crate::ErrorKind::Network)?,
|
|
||||||
)
|
|
||||||
.build()
|
|
||||||
.with_kind(crate::ErrorKind::Network)?;
|
|
||||||
let tor_health_daemon = daemon(
|
|
||||||
move || {
|
|
||||||
let ctx = tor_health_ctx.clone();
|
|
||||||
let client = tor_client.clone();
|
|
||||||
async move { tor_health_check(&client, &ctx.net_controller.tor).await }
|
|
||||||
},
|
|
||||||
Duration::from_secs(300),
|
|
||||||
rpc_ctx.shutdown.subscribe(),
|
|
||||||
);
|
|
||||||
|
|
||||||
embassy::sound::CHIME.play().await?;
|
|
||||||
|
|
||||||
futures::try_join!(
|
|
||||||
server
|
|
||||||
.map_err(|e| Error::new(e, ErrorKind::Network))
|
|
||||||
.map_ok(|_| tracing::debug!("RPC Server Shutdown")),
|
|
||||||
metrics_task
|
|
||||||
.map_err(|e| Error::new(
|
|
||||||
eyre!("{}", e).wrap_err("Metrics daemon panicked!"),
|
|
||||||
ErrorKind::Unknown
|
|
||||||
))
|
|
||||||
.map_ok(|_| tracing::debug!("Metrics daemon Shutdown")),
|
|
||||||
ws_server
|
|
||||||
.map_err(|e| Error::new(e, ErrorKind::Network))
|
|
||||||
.map_ok(|_| tracing::debug!("WebSocket Server Shutdown")),
|
|
||||||
file_server
|
|
||||||
.map_err(|e| Error::new(e, ErrorKind::Network))
|
|
||||||
.map_ok(|_| tracing::debug!("Static File Server Shutdown")),
|
|
||||||
tor_health_daemon
|
|
||||||
.map_err(|e| Error::new(
|
|
||||||
e.wrap_err("Tor Health daemon panicked!"),
|
|
||||||
ErrorKind::Unknown
|
|
||||||
))
|
|
||||||
.map_ok(|_| tracing::debug!("Tor Health daemon Shutdown")),
|
|
||||||
)?;
|
|
||||||
|
|
||||||
let mut shutdown = shutdown_recv
|
|
||||||
.recv()
|
|
||||||
.await
|
|
||||||
.with_kind(crate::ErrorKind::Unknown)?;
|
|
||||||
|
|
||||||
sig_handler.abort();
|
|
||||||
|
|
||||||
if let Some(shutdown) = &mut shutdown {
|
|
||||||
drop(shutdown.db_handle.take());
|
|
||||||
}
|
|
||||||
|
|
||||||
(rpc_ctx, shutdown)
|
|
||||||
};
|
|
||||||
rpc_ctx.shutdown().await?;
|
|
||||||
|
|
||||||
Ok(shutdown)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn main() {
|
|
||||||
let matches = clap::App::new("embassyd")
|
|
||||||
.arg(
|
|
||||||
clap::Arg::with_name("config")
|
|
||||||
.short('c')
|
|
||||||
.long("config")
|
|
||||||
.takes_value(true),
|
|
||||||
)
|
|
||||||
.get_matches();
|
|
||||||
|
|
||||||
EmbassyLogger::init();
|
|
||||||
|
|
||||||
let cfg_path = matches.value_of("config");
|
|
||||||
|
|
||||||
let res = {
|
|
||||||
let rt = tokio::runtime::Builder::new_multi_thread()
|
|
||||||
.enable_all()
|
|
||||||
.build()
|
|
||||||
.expect("failed to initialize runtime");
|
|
||||||
rt.block_on(async {
|
|
||||||
match inner_main(cfg_path).await {
|
|
||||||
Ok(a) => Ok(a),
|
|
||||||
Err(e) => {
|
|
||||||
(|| async {
|
|
||||||
tracing::error!("{}", e.source);
|
|
||||||
tracing::debug!("{:?}", e.source);
|
|
||||||
embassy::sound::BEETHOVEN.play().await?;
|
|
||||||
#[cfg(feature = "avahi")]
|
|
||||||
let _mdns = MdnsController::init();
|
|
||||||
tokio::fs::write(
|
|
||||||
"/etc/nginx/sites-available/default",
|
|
||||||
include_str!("../nginx/diagnostic-ui.conf"),
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
.with_ctx(|_| {
|
|
||||||
(
|
|
||||||
embassy::ErrorKind::Filesystem,
|
|
||||||
"/etc/nginx/sites-available/default",
|
|
||||||
)
|
|
||||||
})?;
|
|
||||||
Command::new("systemctl")
|
|
||||||
.arg("reload")
|
|
||||||
.arg("nginx")
|
|
||||||
.invoke(embassy::ErrorKind::Nginx)
|
|
||||||
.await?;
|
|
||||||
let ctx = DiagnosticContext::init(
|
|
||||||
cfg_path,
|
|
||||||
if tokio::fs::metadata("/embassy-os/disk.guid").await.is_ok() {
|
|
||||||
Some(Arc::new(
|
|
||||||
tokio::fs::read_to_string("/embassy-os/disk.guid") // unique identifier for volume group - keeps track of the disk that goes with your embassy
|
|
||||||
.await?
|
|
||||||
.trim()
|
|
||||||
.to_owned(),
|
|
||||||
))
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
},
|
|
||||||
e,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
let mut shutdown = ctx.shutdown.subscribe();
|
|
||||||
rpc_server!({
|
|
||||||
command: embassy::diagnostic_api,
|
|
||||||
context: ctx.clone(),
|
|
||||||
status: status_fn,
|
|
||||||
middleware: [
|
|
||||||
cors,
|
|
||||||
diagnostic,
|
|
||||||
]
|
|
||||||
})
|
|
||||||
.with_graceful_shutdown({
|
|
||||||
let mut shutdown = ctx.shutdown.subscribe();
|
|
||||||
async move {
|
|
||||||
shutdown.recv().await.expect("context dropped");
|
|
||||||
}
|
|
||||||
})
|
|
||||||
.await
|
|
||||||
.with_kind(embassy::ErrorKind::Network)?;
|
|
||||||
Ok::<_, Error>(shutdown.recv().await.with_kind(crate::ErrorKind::Unknown)?)
|
|
||||||
})()
|
|
||||||
.await
|
|
||||||
}
|
|
||||||
}
|
|
||||||
})
|
|
||||||
};
|
|
||||||
|
|
||||||
match res {
|
|
||||||
Ok(None) => (),
|
|
||||||
Ok(Some(s)) => s.execute(),
|
|
||||||
Err(e) => {
|
|
||||||
eprintln!("{}", e.source);
|
|
||||||
tracing::debug!("{:?}", e.source);
|
|
||||||
drop(e.source);
|
|
||||||
std::process::exit(e.kind as i32)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,120 +0,0 @@
|
|||||||
use std::collections::{BTreeMap, BTreeSet};
|
|
||||||
|
|
||||||
use color_eyre::eyre::eyre;
|
|
||||||
use nix::sys::signal::Signal;
|
|
||||||
use patch_db::HasModel;
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
use tracing::instrument;
|
|
||||||
|
|
||||||
use super::{Config, ConfigSpec};
|
|
||||||
use crate::context::RpcContext;
|
|
||||||
use crate::dependencies::Dependencies;
|
|
||||||
use crate::id::ImageId;
|
|
||||||
use crate::procedure::{PackageProcedure, ProcedureName};
|
|
||||||
use crate::s9pk::manifest::PackageId;
|
|
||||||
use crate::status::health_check::HealthCheckId;
|
|
||||||
use crate::util::Version;
|
|
||||||
use crate::volume::Volumes;
|
|
||||||
use crate::{Error, ResultExt};
|
|
||||||
|
|
||||||
#[derive(Debug, Deserialize, Serialize, HasModel)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
pub struct ConfigRes {
|
|
||||||
pub config: Option<Config>,
|
|
||||||
pub spec: ConfigSpec,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone, Debug, Deserialize, Serialize, HasModel)]
|
|
||||||
pub struct ConfigActions {
|
|
||||||
pub get: PackageProcedure,
|
|
||||||
pub set: PackageProcedure,
|
|
||||||
}
|
|
||||||
impl ConfigActions {
|
|
||||||
#[instrument]
|
|
||||||
pub fn validate(
|
|
||||||
&self,
|
|
||||||
eos_version: &Version,
|
|
||||||
volumes: &Volumes,
|
|
||||||
image_ids: &BTreeSet<ImageId>,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
self.get
|
|
||||||
.validate(eos_version, volumes, image_ids, true)
|
|
||||||
.with_ctx(|_| (crate::ErrorKind::ValidateS9pk, "Config Get"))?;
|
|
||||||
self.set
|
|
||||||
.validate(eos_version, volumes, image_ids, true)
|
|
||||||
.with_ctx(|_| (crate::ErrorKind::ValidateS9pk, "Config Set"))?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
#[instrument(skip(ctx))]
|
|
||||||
pub async fn get(
|
|
||||||
&self,
|
|
||||||
ctx: &RpcContext,
|
|
||||||
pkg_id: &PackageId,
|
|
||||||
pkg_version: &Version,
|
|
||||||
volumes: &Volumes,
|
|
||||||
) -> Result<ConfigRes, Error> {
|
|
||||||
self.get
|
|
||||||
.execute(
|
|
||||||
ctx,
|
|
||||||
pkg_id,
|
|
||||||
pkg_version,
|
|
||||||
ProcedureName::GetConfig,
|
|
||||||
volumes,
|
|
||||||
None::<()>,
|
|
||||||
false,
|
|
||||||
None,
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
.and_then(|res| {
|
|
||||||
res.map_err(|e| Error::new(eyre!("{}", e.1), crate::ErrorKind::ConfigGen))
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
#[instrument(skip(ctx))]
|
|
||||||
pub async fn set(
|
|
||||||
&self,
|
|
||||||
ctx: &RpcContext,
|
|
||||||
pkg_id: &PackageId,
|
|
||||||
pkg_version: &Version,
|
|
||||||
dependencies: &Dependencies,
|
|
||||||
volumes: &Volumes,
|
|
||||||
input: &Config,
|
|
||||||
) -> Result<SetResult, Error> {
|
|
||||||
let res: SetResult = self
|
|
||||||
.set
|
|
||||||
.execute(
|
|
||||||
ctx,
|
|
||||||
pkg_id,
|
|
||||||
pkg_version,
|
|
||||||
ProcedureName::SetConfig,
|
|
||||||
volumes,
|
|
||||||
Some(input),
|
|
||||||
false,
|
|
||||||
None,
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
.and_then(|res| {
|
|
||||||
res.map_err(|e| {
|
|
||||||
Error::new(eyre!("{}", e.1), crate::ErrorKind::ConfigRulesViolation)
|
|
||||||
})
|
|
||||||
})?;
|
|
||||||
Ok(SetResult {
|
|
||||||
signal: res.signal,
|
|
||||||
depends_on: res
|
|
||||||
.depends_on
|
|
||||||
.into_iter()
|
|
||||||
.filter(|(pkg, _)| dependencies.0.contains_key(pkg))
|
|
||||||
.collect(),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Deserialize, Serialize)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
pub struct SetResult {
|
|
||||||
#[serde(default)]
|
|
||||||
#[serde(deserialize_with = "crate::util::serde::deserialize_from_str_opt")]
|
|
||||||
#[serde(serialize_with = "crate::util::serde::serialize_display_opt")]
|
|
||||||
pub signal: Option<Signal>,
|
|
||||||
pub depends_on: BTreeMap<PackageId, BTreeSet<HealthCheckId>>,
|
|
||||||
}
|
|
||||||
@@ -1,824 +0,0 @@
|
|||||||
use std::collections::{BTreeMap, BTreeSet};
|
|
||||||
use std::path::PathBuf;
|
|
||||||
use std::time::Duration;
|
|
||||||
|
|
||||||
use color_eyre::eyre::eyre;
|
|
||||||
use futures::future::{BoxFuture, FutureExt};
|
|
||||||
use indexmap::IndexSet;
|
|
||||||
use itertools::Itertools;
|
|
||||||
use patch_db::{DbHandle, LockReceipt, LockTarget, LockTargetId, LockType, Verifier};
|
|
||||||
use rand::SeedableRng;
|
|
||||||
use regex::Regex;
|
|
||||||
use rpc_toolkit::command;
|
|
||||||
use serde_json::Value;
|
|
||||||
use tracing::instrument;
|
|
||||||
|
|
||||||
use crate::context::RpcContext;
|
|
||||||
use crate::db::model::{CurrentDependencies, CurrentDependencyInfo, CurrentDependents};
|
|
||||||
use crate::dependencies::{
|
|
||||||
add_dependent_to_current_dependents_lists, break_transitive, heal_all_dependents_transitive,
|
|
||||||
BreakTransitiveReceipts, BreakageRes, Dependencies, DependencyConfig, DependencyError,
|
|
||||||
DependencyErrors, DependencyReceipt, TaggedDependencyError, TryHealReceipts,
|
|
||||||
};
|
|
||||||
use crate::install::cleanup::{remove_from_current_dependents_lists, UpdateDependencyReceipts};
|
|
||||||
use crate::s9pk::manifest::{Manifest, PackageId};
|
|
||||||
use crate::util::display_none;
|
|
||||||
use crate::util::serde::{display_serializable, parse_stdin_deserializable, IoFormat};
|
|
||||||
use crate::Error;
|
|
||||||
|
|
||||||
pub mod action;
|
|
||||||
pub mod spec;
|
|
||||||
pub mod util;
|
|
||||||
|
|
||||||
pub use spec::{ConfigSpec, Defaultable};
|
|
||||||
use util::NumRange;
|
|
||||||
|
|
||||||
use self::action::{ConfigActions, ConfigRes};
|
|
||||||
use self::spec::{ConfigPointerReceipts, PackagePointerSpec, ValueSpecPointer};
|
|
||||||
|
|
||||||
pub type Config = serde_json::Map<String, Value>;
|
|
||||||
pub trait TypeOf {
|
|
||||||
fn type_of(&self) -> &'static str;
|
|
||||||
}
|
|
||||||
impl TypeOf for Value {
|
|
||||||
fn type_of(&self) -> &'static str {
|
|
||||||
match self {
|
|
||||||
Value::Array(_) => "list",
|
|
||||||
Value::Bool(_) => "boolean",
|
|
||||||
Value::Null => "null",
|
|
||||||
Value::Number(_) => "number",
|
|
||||||
Value::Object(_) => "object",
|
|
||||||
Value::String(_) => "string",
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, thiserror::Error)]
|
|
||||||
pub enum ConfigurationError {
|
|
||||||
#[error("Timeout Error")]
|
|
||||||
TimeoutError(#[from] TimeoutError),
|
|
||||||
#[error("No Match: {0}")]
|
|
||||||
NoMatch(#[from] NoMatchWithPath),
|
|
||||||
#[error("System Error: {0}")]
|
|
||||||
SystemError(Error),
|
|
||||||
#[error("Permission Denied: {0}")]
|
|
||||||
PermissionDenied(ValueSpecPointer),
|
|
||||||
}
|
|
||||||
impl From<ConfigurationError> for Error {
|
|
||||||
fn from(err: ConfigurationError) -> Self {
|
|
||||||
let kind = match &err {
|
|
||||||
ConfigurationError::SystemError(e) => e.kind,
|
|
||||||
_ => crate::ErrorKind::ConfigGen,
|
|
||||||
};
|
|
||||||
crate::Error::new(err, kind)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone, Copy, Debug, thiserror::Error)]
|
|
||||||
#[error("Timeout Error")]
|
|
||||||
pub struct TimeoutError;
|
|
||||||
|
|
||||||
#[derive(Clone, Debug, thiserror::Error)]
|
|
||||||
pub struct NoMatchWithPath {
|
|
||||||
pub path: Vec<String>,
|
|
||||||
pub error: MatchError,
|
|
||||||
}
|
|
||||||
impl NoMatchWithPath {
|
|
||||||
pub fn new(error: MatchError) -> Self {
|
|
||||||
NoMatchWithPath {
|
|
||||||
path: Vec::new(),
|
|
||||||
error,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
pub fn prepend(mut self, seg: String) -> Self {
|
|
||||||
self.path.push(seg);
|
|
||||||
self
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl std::fmt::Display for NoMatchWithPath {
|
|
||||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
|
||||||
write!(f, "{}: {}", self.path.iter().rev().join("."), self.error)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl From<NoMatchWithPath> for Error {
|
|
||||||
fn from(e: NoMatchWithPath) -> Self {
|
|
||||||
ConfigurationError::from(e).into()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone, Debug, thiserror::Error)]
|
|
||||||
pub enum MatchError {
|
|
||||||
#[error("String {0:?} Does Not Match Pattern {1}")]
|
|
||||||
Pattern(String, Regex),
|
|
||||||
#[error("String {0:?} Is Not In Enum {1:?}")]
|
|
||||||
Enum(String, IndexSet<String>),
|
|
||||||
#[error("Field Is Not Nullable")]
|
|
||||||
NotNullable,
|
|
||||||
#[error("Length Mismatch: expected {0}, actual: {1}")]
|
|
||||||
LengthMismatch(NumRange<usize>, usize),
|
|
||||||
#[error("Invalid Type: expected {0}, actual: {1}")]
|
|
||||||
InvalidType(&'static str, &'static str),
|
|
||||||
#[error("Number Out Of Range: expected {0}, actual: {1}")]
|
|
||||||
OutOfRange(NumRange<f64>, f64),
|
|
||||||
#[error("Number Is Not Integral: {0}")]
|
|
||||||
NonIntegral(f64),
|
|
||||||
#[error("Variant {0:?} Is Not In Union {1:?}")]
|
|
||||||
Union(String, IndexSet<String>),
|
|
||||||
#[error("Variant Is Missing Tag {0:?}")]
|
|
||||||
MissingTag(String),
|
|
||||||
#[error("Property {0:?} Of Variant {1:?} Conflicts With Union Tag")]
|
|
||||||
PropertyMatchesUnionTag(String, String),
|
|
||||||
#[error("Name of Property {0:?} Conflicts With Map Tag Name")]
|
|
||||||
PropertyNameMatchesMapTag(String),
|
|
||||||
#[error("Pointer Is Invalid: {0}")]
|
|
||||||
InvalidPointer(spec::ValueSpecPointer),
|
|
||||||
#[error("Object Key Is Invalid: {0}")]
|
|
||||||
InvalidKey(String),
|
|
||||||
#[error("Value In List Is Not Unique")]
|
|
||||||
ListUniquenessViolation,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[command(rename = "config-spec", cli_only, blocking, display(display_none))]
|
|
||||||
pub fn verify_spec(#[arg] path: PathBuf) -> Result<(), Error> {
|
|
||||||
let mut file = std::fs::File::open(&path)?;
|
|
||||||
let format = match path.extension().and_then(|s| s.to_str()) {
|
|
||||||
Some("yaml") | Some("yml") => IoFormat::Yaml,
|
|
||||||
Some("json") => IoFormat::Json,
|
|
||||||
Some("toml") => IoFormat::Toml,
|
|
||||||
Some("cbor") => IoFormat::Cbor,
|
|
||||||
_ => {
|
|
||||||
return Err(Error::new(
|
|
||||||
eyre!("Unknown file format. Expected one of yaml, json, toml, cbor."),
|
|
||||||
crate::ErrorKind::Deserialization,
|
|
||||||
));
|
|
||||||
}
|
|
||||||
};
|
|
||||||
let _: ConfigSpec = format.from_reader(&mut file)?;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[command(subcommands(get, set))]
|
|
||||||
pub fn config(#[arg] id: PackageId) -> Result<PackageId, Error> {
|
|
||||||
Ok(id)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct ConfigGetReceipts {
|
|
||||||
manifest_volumes: LockReceipt<crate::volume::Volumes, ()>,
|
|
||||||
manifest_version: LockReceipt<crate::util::Version, ()>,
|
|
||||||
manifest_config: LockReceipt<Option<ConfigActions>, ()>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ConfigGetReceipts {
|
|
||||||
pub async fn new<'a>(db: &'a mut impl DbHandle, id: &PackageId) -> Result<Self, Error> {
|
|
||||||
let mut locks = Vec::new();
|
|
||||||
|
|
||||||
let setup = Self::setup(&mut locks, id);
|
|
||||||
Ok(setup(&db.lock_all(locks).await?)?)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn setup(
|
|
||||||
locks: &mut Vec<LockTargetId>,
|
|
||||||
id: &PackageId,
|
|
||||||
) -> impl FnOnce(&Verifier) -> Result<Self, Error> {
|
|
||||||
let manifest_version = crate::db::DatabaseModel::new()
|
|
||||||
.package_data()
|
|
||||||
.idx_model(id)
|
|
||||||
.and_then(|x| x.installed())
|
|
||||||
.map(|x| x.manifest().version())
|
|
||||||
.make_locker(LockType::Write)
|
|
||||||
.add_to_keys(locks);
|
|
||||||
let manifest_volumes = crate::db::DatabaseModel::new()
|
|
||||||
.package_data()
|
|
||||||
.idx_model(id)
|
|
||||||
.and_then(|x| x.installed())
|
|
||||||
.map(|x| x.manifest().volumes())
|
|
||||||
.make_locker(LockType::Write)
|
|
||||||
.add_to_keys(locks);
|
|
||||||
let manifest_config = crate::db::DatabaseModel::new()
|
|
||||||
.package_data()
|
|
||||||
.idx_model(id)
|
|
||||||
.and_then(|x| x.installed())
|
|
||||||
.map(|x| x.manifest().config())
|
|
||||||
.make_locker(LockType::Write)
|
|
||||||
.add_to_keys(locks);
|
|
||||||
move |skeleton_key| {
|
|
||||||
Ok(Self {
|
|
||||||
manifest_volumes: manifest_volumes.verify(skeleton_key)?,
|
|
||||||
manifest_version: manifest_version.verify(skeleton_key)?,
|
|
||||||
manifest_config: manifest_config.verify(skeleton_key)?,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[command(display(display_serializable))]
|
|
||||||
#[instrument(skip(ctx))]
|
|
||||||
pub async fn get(
|
|
||||||
#[context] ctx: RpcContext,
|
|
||||||
#[parent_data] id: PackageId,
|
|
||||||
#[allow(unused_variables)]
|
|
||||||
#[arg(long = "format")]
|
|
||||||
format: Option<IoFormat>,
|
|
||||||
) -> Result<ConfigRes, Error> {
|
|
||||||
let mut db = ctx.db.handle();
|
|
||||||
let receipts = ConfigGetReceipts::new(&mut db, &id).await?;
|
|
||||||
let action = receipts
|
|
||||||
.manifest_config
|
|
||||||
.get(&mut db)
|
|
||||||
.await?
|
|
||||||
.ok_or_else(|| Error::new(eyre!("{} has no config", id), crate::ErrorKind::NotFound))?;
|
|
||||||
|
|
||||||
let volumes = receipts.manifest_volumes.get(&mut db).await?;
|
|
||||||
let version = receipts.manifest_version.get(&mut db).await?;
|
|
||||||
action.get(&ctx, &id, &version, &volumes).await
|
|
||||||
}
|
|
||||||
|
|
||||||
#[command(
|
|
||||||
subcommands(self(set_impl(async, context(RpcContext))), set_dry),
|
|
||||||
display(display_none),
|
|
||||||
metadata(sync_db = true)
|
|
||||||
)]
|
|
||||||
#[instrument]
|
|
||||||
pub fn set(
|
|
||||||
#[parent_data] id: PackageId,
|
|
||||||
#[allow(unused_variables)]
|
|
||||||
#[arg(long = "format")]
|
|
||||||
format: Option<IoFormat>,
|
|
||||||
#[arg(long = "timeout")] timeout: Option<crate::util::serde::Duration>,
|
|
||||||
#[arg(stdin, parse(parse_stdin_deserializable))] config: Option<Config>,
|
|
||||||
) -> Result<(PackageId, Option<Config>, Option<Duration>), Error> {
|
|
||||||
Ok((id, config, timeout.map(|d| *d)))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// So, the new locking finds all the possible locks and lifts them up into a bundle of locks.
|
|
||||||
/// Then this bundle will be passed down into the functions that will need to touch the db, and
|
|
||||||
/// instead of doing the locks down in the system, we have already done the locks and can
|
|
||||||
/// do the operation on the db.
|
|
||||||
/// An UnlockedLock has two types, the type of setting and getting from the db, and the second type
|
|
||||||
/// is the keys that we need to insert on getting/setting because we have included wild cards into the paths.
|
|
||||||
pub struct ConfigReceipts {
|
|
||||||
pub dependency_receipt: DependencyReceipt,
|
|
||||||
pub config_receipts: ConfigPointerReceipts,
|
|
||||||
pub update_dependency_receipts: UpdateDependencyReceipts,
|
|
||||||
pub try_heal_receipts: TryHealReceipts,
|
|
||||||
pub break_transitive_receipts: BreakTransitiveReceipts,
|
|
||||||
configured: LockReceipt<bool, String>,
|
|
||||||
config_actions: LockReceipt<ConfigActions, String>,
|
|
||||||
dependencies: LockReceipt<Dependencies, String>,
|
|
||||||
volumes: LockReceipt<crate::volume::Volumes, String>,
|
|
||||||
version: LockReceipt<crate::util::Version, String>,
|
|
||||||
manifest: LockReceipt<Manifest, String>,
|
|
||||||
system_pointers: LockReceipt<Vec<spec::SystemPointerSpec>, String>,
|
|
||||||
pub current_dependents: LockReceipt<CurrentDependents, String>,
|
|
||||||
pub current_dependencies: LockReceipt<CurrentDependencies, String>,
|
|
||||||
dependency_errors: LockReceipt<DependencyErrors, String>,
|
|
||||||
manifest_dependencies_config: LockReceipt<DependencyConfig, (String, String)>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ConfigReceipts {
|
|
||||||
pub async fn new<'a>(db: &'a mut impl DbHandle) -> Result<Self, Error> {
|
|
||||||
let mut locks = Vec::new();
|
|
||||||
|
|
||||||
let setup = Self::setup(&mut locks);
|
|
||||||
Ok(setup(&db.lock_all(locks).await?)?)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn setup(locks: &mut Vec<LockTargetId>) -> impl FnOnce(&Verifier) -> Result<Self, Error> {
|
|
||||||
let dependency_receipt = DependencyReceipt::setup(locks);
|
|
||||||
let config_receipts = ConfigPointerReceipts::setup(locks);
|
|
||||||
let update_dependency_receipts = UpdateDependencyReceipts::setup(locks);
|
|
||||||
let break_transitive_receipts = BreakTransitiveReceipts::setup(locks);
|
|
||||||
let try_heal_receipts = TryHealReceipts::setup(locks);
|
|
||||||
|
|
||||||
let configured: LockTarget<bool, String> = crate::db::DatabaseModel::new()
|
|
||||||
.package_data()
|
|
||||||
.star()
|
|
||||||
.installed()
|
|
||||||
.map(|x| x.status().configured())
|
|
||||||
.make_locker(LockType::Write)
|
|
||||||
.add_to_keys(locks);
|
|
||||||
|
|
||||||
let config_actions = crate::db::DatabaseModel::new()
|
|
||||||
.package_data()
|
|
||||||
.star()
|
|
||||||
.installed()
|
|
||||||
.and_then(|x| x.manifest().config())
|
|
||||||
.make_locker(LockType::Read)
|
|
||||||
.add_to_keys(locks);
|
|
||||||
|
|
||||||
let dependencies = crate::db::DatabaseModel::new()
|
|
||||||
.package_data()
|
|
||||||
.star()
|
|
||||||
.installed()
|
|
||||||
.map(|x| x.manifest().dependencies())
|
|
||||||
.make_locker(LockType::Read)
|
|
||||||
.add_to_keys(locks);
|
|
||||||
|
|
||||||
let volumes = crate::db::DatabaseModel::new()
|
|
||||||
.package_data()
|
|
||||||
.star()
|
|
||||||
.installed()
|
|
||||||
.map(|x| x.manifest().volumes())
|
|
||||||
.make_locker(LockType::Read)
|
|
||||||
.add_to_keys(locks);
|
|
||||||
|
|
||||||
let version = crate::db::DatabaseModel::new()
|
|
||||||
.package_data()
|
|
||||||
.star()
|
|
||||||
.installed()
|
|
||||||
.map(|x| x.manifest().version())
|
|
||||||
.make_locker(LockType::Read)
|
|
||||||
.add_to_keys(locks);
|
|
||||||
|
|
||||||
let manifest = crate::db::DatabaseModel::new()
|
|
||||||
.package_data()
|
|
||||||
.star()
|
|
||||||
.installed()
|
|
||||||
.map(|x| x.manifest())
|
|
||||||
.make_locker(LockType::Read)
|
|
||||||
.add_to_keys(locks);
|
|
||||||
|
|
||||||
let system_pointers = crate::db::DatabaseModel::new()
|
|
||||||
.package_data()
|
|
||||||
.star()
|
|
||||||
.installed()
|
|
||||||
.map(|x| x.system_pointers())
|
|
||||||
.make_locker(LockType::Write)
|
|
||||||
.add_to_keys(locks);
|
|
||||||
|
|
||||||
let current_dependents = crate::db::DatabaseModel::new()
|
|
||||||
.package_data()
|
|
||||||
.star()
|
|
||||||
.installed()
|
|
||||||
.map(|x| x.current_dependents())
|
|
||||||
.make_locker(LockType::Write)
|
|
||||||
.add_to_keys(locks);
|
|
||||||
|
|
||||||
let current_dependencies = crate::db::DatabaseModel::new()
|
|
||||||
.package_data()
|
|
||||||
.star()
|
|
||||||
.installed()
|
|
||||||
.map(|x| x.current_dependencies())
|
|
||||||
.make_locker(LockType::Write)
|
|
||||||
.add_to_keys(locks);
|
|
||||||
|
|
||||||
let dependency_errors = crate::db::DatabaseModel::new()
|
|
||||||
.package_data()
|
|
||||||
.star()
|
|
||||||
.installed()
|
|
||||||
.map(|x| x.status().dependency_errors())
|
|
||||||
.make_locker(LockType::Write)
|
|
||||||
.add_to_keys(locks);
|
|
||||||
|
|
||||||
let manifest_dependencies_config = crate::db::DatabaseModel::new()
|
|
||||||
.package_data()
|
|
||||||
.star()
|
|
||||||
.installed()
|
|
||||||
.and_then(|x| x.manifest().dependencies().star().config())
|
|
||||||
.make_locker(LockType::Write)
|
|
||||||
.add_to_keys(locks);
|
|
||||||
|
|
||||||
move |skeleton_key| {
|
|
||||||
Ok(Self {
|
|
||||||
dependency_receipt: dependency_receipt(skeleton_key)?,
|
|
||||||
config_receipts: config_receipts(skeleton_key)?,
|
|
||||||
try_heal_receipts: try_heal_receipts(skeleton_key)?,
|
|
||||||
break_transitive_receipts: break_transitive_receipts(skeleton_key)?,
|
|
||||||
update_dependency_receipts: update_dependency_receipts(skeleton_key)?,
|
|
||||||
configured: configured.verify(skeleton_key)?,
|
|
||||||
config_actions: config_actions.verify(skeleton_key)?,
|
|
||||||
dependencies: dependencies.verify(skeleton_key)?,
|
|
||||||
volumes: volumes.verify(skeleton_key)?,
|
|
||||||
version: version.verify(skeleton_key)?,
|
|
||||||
manifest: manifest.verify(skeleton_key)?,
|
|
||||||
system_pointers: system_pointers.verify(skeleton_key)?,
|
|
||||||
current_dependents: current_dependents.verify(skeleton_key)?,
|
|
||||||
current_dependencies: current_dependencies.verify(skeleton_key)?,
|
|
||||||
dependency_errors: dependency_errors.verify(skeleton_key)?,
|
|
||||||
manifest_dependencies_config: manifest_dependencies_config.verify(skeleton_key)?,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[command(rename = "dry", display(display_serializable))]
|
|
||||||
#[instrument(skip(ctx))]
|
|
||||||
pub async fn set_dry(
|
|
||||||
#[context] ctx: RpcContext,
|
|
||||||
#[parent_data] (id, config, timeout): (PackageId, Option<Config>, Option<Duration>),
|
|
||||||
) -> Result<BreakageRes, Error> {
|
|
||||||
let mut db = ctx.db.handle();
|
|
||||||
let mut tx = db.begin().await?;
|
|
||||||
let mut breakages = BTreeMap::new();
|
|
||||||
let locks = ConfigReceipts::new(&mut tx).await?;
|
|
||||||
configure(
|
|
||||||
&ctx,
|
|
||||||
&mut tx,
|
|
||||||
&id,
|
|
||||||
config,
|
|
||||||
&timeout,
|
|
||||||
true,
|
|
||||||
&mut BTreeMap::new(),
|
|
||||||
&mut breakages,
|
|
||||||
&locks,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
locks.configured.set(&mut tx, true, &id).await?;
|
|
||||||
tx.abort().await?;
|
|
||||||
Ok(BreakageRes(breakages))
|
|
||||||
}
|
|
||||||
|
|
||||||
#[instrument(skip(ctx))]
|
|
||||||
pub async fn set_impl(
|
|
||||||
ctx: RpcContext,
|
|
||||||
(id, config, timeout): (PackageId, Option<Config>, Option<Duration>),
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
let mut db = ctx.db.handle();
|
|
||||||
let mut tx = db.begin().await?;
|
|
||||||
let mut breakages = BTreeMap::new();
|
|
||||||
let locks = ConfigReceipts::new(&mut tx).await?;
|
|
||||||
configure(
|
|
||||||
&ctx,
|
|
||||||
&mut tx,
|
|
||||||
&id,
|
|
||||||
config,
|
|
||||||
&timeout,
|
|
||||||
false,
|
|
||||||
&mut BTreeMap::new(),
|
|
||||||
&mut breakages,
|
|
||||||
&locks,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
tx.commit().await?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[instrument(skip(ctx, db, receipts))]
|
|
||||||
pub async fn configure<'a, Db: DbHandle>(
|
|
||||||
ctx: &RpcContext,
|
|
||||||
db: &'a mut Db,
|
|
||||||
id: &PackageId,
|
|
||||||
config: Option<Config>,
|
|
||||||
timeout: &Option<Duration>,
|
|
||||||
dry_run: bool,
|
|
||||||
overrides: &mut BTreeMap<PackageId, Config>,
|
|
||||||
breakages: &mut BTreeMap<PackageId, TaggedDependencyError>,
|
|
||||||
receipts: &ConfigReceipts,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
configure_rec(
|
|
||||||
ctx, db, id, config, timeout, dry_run, overrides, breakages, receipts,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
receipts.configured.set(db, true, &id).await?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[instrument(skip(ctx, db, receipts))]
|
|
||||||
pub fn configure_rec<'a, Db: DbHandle>(
|
|
||||||
ctx: &'a RpcContext,
|
|
||||||
db: &'a mut Db,
|
|
||||||
id: &'a PackageId,
|
|
||||||
config: Option<Config>,
|
|
||||||
timeout: &'a Option<Duration>,
|
|
||||||
dry_run: bool,
|
|
||||||
overrides: &'a mut BTreeMap<PackageId, Config>,
|
|
||||||
breakages: &'a mut BTreeMap<PackageId, TaggedDependencyError>,
|
|
||||||
receipts: &'a ConfigReceipts,
|
|
||||||
) -> BoxFuture<'a, Result<(), Error>> {
|
|
||||||
async move {
|
|
||||||
// fetch data from db
|
|
||||||
let action = receipts
|
|
||||||
.config_actions
|
|
||||||
.get(db, id)
|
|
||||||
.await?
|
|
||||||
.ok_or_else(not_found)?;
|
|
||||||
let dependencies = receipts
|
|
||||||
.dependencies
|
|
||||||
.get(db, id)
|
|
||||||
.await?
|
|
||||||
.ok_or_else(not_found)?;
|
|
||||||
let volumes = receipts.volumes.get(db, id).await?.ok_or_else(not_found)?;
|
|
||||||
let is_needs_config = !receipts
|
|
||||||
.configured
|
|
||||||
.get(db, id)
|
|
||||||
.await?
|
|
||||||
.ok_or_else(not_found)?;
|
|
||||||
let version = receipts.version.get(db, id).await?.ok_or_else(not_found)?;
|
|
||||||
|
|
||||||
// get current config and current spec
|
|
||||||
let ConfigRes {
|
|
||||||
config: old_config,
|
|
||||||
spec,
|
|
||||||
} = action.get(ctx, id, &version, &volumes).await?;
|
|
||||||
|
|
||||||
// determine new config to use
|
|
||||||
let mut config = if let Some(config) = config.or_else(|| old_config.clone()) {
|
|
||||||
config
|
|
||||||
} else {
|
|
||||||
spec.gen(&mut rand::rngs::StdRng::from_entropy(), timeout)?
|
|
||||||
};
|
|
||||||
|
|
||||||
let manifest = receipts.manifest.get(db, id).await?.ok_or_else(not_found)?;
|
|
||||||
|
|
||||||
spec.validate(&manifest)?;
|
|
||||||
spec.matches(&config)?; // check that new config matches spec
|
|
||||||
spec.update(
|
|
||||||
ctx,
|
|
||||||
db,
|
|
||||||
&manifest,
|
|
||||||
&*overrides,
|
|
||||||
&mut config,
|
|
||||||
&receipts.config_receipts,
|
|
||||||
)
|
|
||||||
.await?; // dereference pointers in the new config
|
|
||||||
|
|
||||||
// create backreferences to pointers
|
|
||||||
let mut sys = receipts
|
|
||||||
.system_pointers
|
|
||||||
.get(db, &id)
|
|
||||||
.await?
|
|
||||||
.ok_or_else(not_found)?;
|
|
||||||
sys.truncate(0);
|
|
||||||
let mut current_dependencies: CurrentDependencies = CurrentDependencies(
|
|
||||||
dependencies
|
|
||||||
.0
|
|
||||||
.iter()
|
|
||||||
.filter_map(|(id, info)| {
|
|
||||||
if info.requirement.required() {
|
|
||||||
Some((id.clone(), CurrentDependencyInfo::default()))
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
}
|
|
||||||
})
|
|
||||||
.collect(),
|
|
||||||
);
|
|
||||||
for ptr in spec.pointers(&config)? {
|
|
||||||
match ptr {
|
|
||||||
ValueSpecPointer::Package(pkg_ptr) => {
|
|
||||||
if let Some(current_dependency) =
|
|
||||||
current_dependencies.0.get_mut(pkg_ptr.package_id())
|
|
||||||
{
|
|
||||||
current_dependency.pointers.push(pkg_ptr);
|
|
||||||
} else {
|
|
||||||
current_dependencies.0.insert(
|
|
||||||
pkg_ptr.package_id().to_owned(),
|
|
||||||
CurrentDependencyInfo {
|
|
||||||
pointers: vec![pkg_ptr],
|
|
||||||
health_checks: BTreeSet::new(),
|
|
||||||
},
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
ValueSpecPointer::System(s) => sys.push(s),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
receipts.system_pointers.set(db, sys, &id).await?;
|
|
||||||
|
|
||||||
let signal = if !dry_run {
|
|
||||||
// run config action
|
|
||||||
let res = action
|
|
||||||
.set(ctx, id, &version, &dependencies, &volumes, &config)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
// track dependencies with no pointers
|
|
||||||
for (package_id, health_checks) in res.depends_on.into_iter() {
|
|
||||||
if let Some(current_dependency) = current_dependencies.0.get_mut(&package_id) {
|
|
||||||
current_dependency.health_checks.extend(health_checks);
|
|
||||||
} else {
|
|
||||||
current_dependencies.0.insert(
|
|
||||||
package_id,
|
|
||||||
CurrentDependencyInfo {
|
|
||||||
pointers: Vec::new(),
|
|
||||||
health_checks,
|
|
||||||
},
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// track dependency health checks
|
|
||||||
current_dependencies = current_dependencies.map(|x| {
|
|
||||||
x.into_iter()
|
|
||||||
.filter(|(dep_id, _)| {
|
|
||||||
if dep_id != id && !manifest.dependencies.0.contains_key(dep_id) {
|
|
||||||
tracing::warn!("Illegal dependency specified: {}", dep_id);
|
|
||||||
false
|
|
||||||
} else {
|
|
||||||
true
|
|
||||||
}
|
|
||||||
})
|
|
||||||
.collect()
|
|
||||||
});
|
|
||||||
res.signal
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
};
|
|
||||||
|
|
||||||
// update dependencies
|
|
||||||
let prev_current_dependencies = receipts
|
|
||||||
.current_dependencies
|
|
||||||
.get(db, &id)
|
|
||||||
.await?
|
|
||||||
.unwrap_or_default();
|
|
||||||
remove_from_current_dependents_lists(
|
|
||||||
db,
|
|
||||||
id,
|
|
||||||
&prev_current_dependencies,
|
|
||||||
&receipts.current_dependents,
|
|
||||||
)
|
|
||||||
.await?; // remove previous
|
|
||||||
add_dependent_to_current_dependents_lists(
|
|
||||||
db,
|
|
||||||
id,
|
|
||||||
¤t_dependencies,
|
|
||||||
&receipts.current_dependents,
|
|
||||||
)
|
|
||||||
.await?; // add new
|
|
||||||
current_dependencies.0.remove(id);
|
|
||||||
receipts
|
|
||||||
.current_dependencies
|
|
||||||
.set(db, current_dependencies.clone(), &id)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
let errs = receipts
|
|
||||||
.dependency_errors
|
|
||||||
.get(db, &id)
|
|
||||||
.await?
|
|
||||||
.ok_or_else(not_found)?;
|
|
||||||
tracing::warn!("Dependency Errors: {:?}", errs);
|
|
||||||
let errs = DependencyErrors::init(
|
|
||||||
ctx,
|
|
||||||
db,
|
|
||||||
&manifest,
|
|
||||||
¤t_dependencies,
|
|
||||||
&receipts.dependency_receipt.try_heal,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
receipts.dependency_errors.set(db, errs, &id).await?;
|
|
||||||
|
|
||||||
// cache current config for dependents
|
|
||||||
overrides.insert(id.clone(), config.clone());
|
|
||||||
|
|
||||||
// handle dependents
|
|
||||||
let dependents = receipts
|
|
||||||
.current_dependents
|
|
||||||
.get(db, id)
|
|
||||||
.await?
|
|
||||||
.ok_or_else(not_found)?;
|
|
||||||
let prev = if is_needs_config { None } else { old_config }
|
|
||||||
.map(Value::Object)
|
|
||||||
.unwrap_or_default();
|
|
||||||
let next = Value::Object(config.clone());
|
|
||||||
for (dependent, dep_info) in dependents.0.iter().filter(|(dep_id, _)| dep_id != &id) {
|
|
||||||
// check if config passes dependent check
|
|
||||||
if let Some(cfg) = receipts
|
|
||||||
.manifest_dependencies_config
|
|
||||||
.get(db, (&dependent, &id))
|
|
||||||
.await?
|
|
||||||
{
|
|
||||||
let manifest = receipts
|
|
||||||
.manifest
|
|
||||||
.get(db, &dependent)
|
|
||||||
.await?
|
|
||||||
.ok_or_else(not_found)?;
|
|
||||||
if let Err(error) = cfg
|
|
||||||
.check(
|
|
||||||
ctx,
|
|
||||||
dependent,
|
|
||||||
&manifest.version,
|
|
||||||
&manifest.volumes,
|
|
||||||
id,
|
|
||||||
&config,
|
|
||||||
)
|
|
||||||
.await?
|
|
||||||
{
|
|
||||||
let dep_err = DependencyError::ConfigUnsatisfied { error };
|
|
||||||
break_transitive(
|
|
||||||
db,
|
|
||||||
dependent,
|
|
||||||
id,
|
|
||||||
dep_err,
|
|
||||||
breakages,
|
|
||||||
&receipts.break_transitive_receipts,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
}
|
|
||||||
|
|
||||||
// handle backreferences
|
|
||||||
for ptr in &dep_info.pointers {
|
|
||||||
if let PackagePointerSpec::Config(cfg_ptr) = ptr {
|
|
||||||
if cfg_ptr.select(&next) != cfg_ptr.select(&prev) {
|
|
||||||
if let Err(e) = configure_rec(
|
|
||||||
ctx, db, dependent, None, timeout, dry_run, overrides, breakages,
|
|
||||||
receipts,
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
{
|
|
||||||
if e.kind == crate::ErrorKind::ConfigRulesViolation {
|
|
||||||
break_transitive(
|
|
||||||
db,
|
|
||||||
dependent,
|
|
||||||
id,
|
|
||||||
DependencyError::ConfigUnsatisfied {
|
|
||||||
error: format!("{}", e),
|
|
||||||
},
|
|
||||||
breakages,
|
|
||||||
&receipts.break_transitive_receipts,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
} else {
|
|
||||||
return Err(e);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
heal_all_dependents_transitive(ctx, db, id, &receipts.dependency_receipt).await?;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(signal) = signal {
|
|
||||||
match ctx.managers.get(&(id.clone(), version.clone())).await {
|
|
||||||
None => {
|
|
||||||
// in theory this should never happen, which indicates this function should be moved behind the
|
|
||||||
// Manager interface
|
|
||||||
return Err(Error::new(
|
|
||||||
eyre!("Manager Not Found for package being configured"),
|
|
||||||
crate::ErrorKind::Incoherent,
|
|
||||||
));
|
|
||||||
}
|
|
||||||
Some(m) => {
|
|
||||||
m.signal(&signal).await?;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
.boxed()
|
|
||||||
}
|
|
||||||
#[instrument]
|
|
||||||
pub fn not_found() -> Error {
|
|
||||||
Error::new(eyre!("Could not find"), crate::ErrorKind::Incoherent)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// We want to have a double check that the paths are what we expect them to be.
|
|
||||||
/// Found that earlier the paths where not what we expected them to be.
|
|
||||||
#[tokio::test]
|
|
||||||
async fn ensure_creation_of_config_paths_makes_sense() {
|
|
||||||
let mut fake = patch_db::test_utils::NoOpDb();
|
|
||||||
let config_locks = ConfigReceipts::new(&mut fake).await.unwrap();
|
|
||||||
assert_eq!(
|
|
||||||
&format!("{}", config_locks.configured.lock.glob),
|
|
||||||
"/package-data/*/installed/status/configured"
|
|
||||||
);
|
|
||||||
assert_eq!(
|
|
||||||
&format!("{}", config_locks.config_actions.lock.glob),
|
|
||||||
"/package-data/*/installed/manifest/config"
|
|
||||||
);
|
|
||||||
assert_eq!(
|
|
||||||
&format!("{}", config_locks.dependencies.lock.glob),
|
|
||||||
"/package-data/*/installed/manifest/dependencies"
|
|
||||||
);
|
|
||||||
assert_eq!(
|
|
||||||
&format!("{}", config_locks.volumes.lock.glob),
|
|
||||||
"/package-data/*/installed/manifest/volumes"
|
|
||||||
);
|
|
||||||
assert_eq!(
|
|
||||||
&format!("{}", config_locks.version.lock.glob),
|
|
||||||
"/package-data/*/installed/manifest/version"
|
|
||||||
);
|
|
||||||
assert_eq!(
|
|
||||||
&format!("{}", config_locks.volumes.lock.glob),
|
|
||||||
"/package-data/*/installed/manifest/volumes"
|
|
||||||
);
|
|
||||||
assert_eq!(
|
|
||||||
&format!("{}", config_locks.manifest.lock.glob),
|
|
||||||
"/package-data/*/installed/manifest"
|
|
||||||
);
|
|
||||||
assert_eq!(
|
|
||||||
&format!("{}", config_locks.manifest.lock.glob),
|
|
||||||
"/package-data/*/installed/manifest"
|
|
||||||
);
|
|
||||||
assert_eq!(
|
|
||||||
&format!("{}", config_locks.system_pointers.lock.glob),
|
|
||||||
"/package-data/*/installed/system-pointers"
|
|
||||||
);
|
|
||||||
assert_eq!(
|
|
||||||
&format!("{}", config_locks.current_dependents.lock.glob),
|
|
||||||
"/package-data/*/installed/current-dependents"
|
|
||||||
);
|
|
||||||
assert_eq!(
|
|
||||||
&format!("{}", config_locks.dependency_errors.lock.glob),
|
|
||||||
"/package-data/*/installed/status/dependency-errors"
|
|
||||||
);
|
|
||||||
assert_eq!(
|
|
||||||
&format!("{}", config_locks.manifest_dependencies_config.lock.glob),
|
|
||||||
"/package-data/*/installed/manifest/dependencies/*/config"
|
|
||||||
);
|
|
||||||
assert_eq!(
|
|
||||||
&format!("{}", config_locks.system_pointers.lock.glob),
|
|
||||||
"/package-data/*/installed/system-pointers"
|
|
||||||
);
|
|
||||||
}
|
|
||||||
@@ -1,406 +0,0 @@
|
|||||||
use std::borrow::Cow;
|
|
||||||
use std::ops::{Bound, RangeBounds, RangeInclusive};
|
|
||||||
|
|
||||||
use rand::distributions::Distribution;
|
|
||||||
use rand::Rng;
|
|
||||||
use serde_json::Value;
|
|
||||||
|
|
||||||
use super::Config;
|
|
||||||
|
|
||||||
pub const STATIC_NULL: Value = Value::Null;
|
|
||||||
|
|
||||||
#[derive(Clone, Debug)]
|
|
||||||
pub struct CharSet(pub Vec<(RangeInclusive<char>, usize)>, usize);
|
|
||||||
impl CharSet {
|
|
||||||
pub fn contains(&self, c: &char) -> bool {
|
|
||||||
self.0.iter().any(|r| r.0.contains(c))
|
|
||||||
}
|
|
||||||
pub fn gen<R: Rng>(&self, rng: &mut R) -> char {
|
|
||||||
let mut idx = rng.gen_range(0..self.1);
|
|
||||||
for r in &self.0 {
|
|
||||||
if idx < r.1 {
|
|
||||||
return std::convert::TryFrom::try_from(
|
|
||||||
rand::distributions::Uniform::new_inclusive(
|
|
||||||
u32::from(*r.0.start()),
|
|
||||||
u32::from(*r.0.end()),
|
|
||||||
)
|
|
||||||
.sample(rng),
|
|
||||||
)
|
|
||||||
.unwrap();
|
|
||||||
} else {
|
|
||||||
idx -= r.1;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
unreachable!()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl Default for CharSet {
|
|
||||||
fn default() -> Self {
|
|
||||||
CharSet(vec![('!'..='~', 94)], 94)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl<'de> serde::de::Deserialize<'de> for CharSet {
|
|
||||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
|
||||||
where
|
|
||||||
D: serde::de::Deserializer<'de>,
|
|
||||||
{
|
|
||||||
let s = String::deserialize(deserializer)?;
|
|
||||||
let mut res = Vec::new();
|
|
||||||
let mut len = 0;
|
|
||||||
let mut a: Option<char> = None;
|
|
||||||
let mut b: Option<char> = None;
|
|
||||||
let mut in_range = false;
|
|
||||||
for c in s.chars() {
|
|
||||||
match c {
|
|
||||||
',' => match (a, b, in_range) {
|
|
||||||
(Some(start), Some(end), _) => {
|
|
||||||
if !end.is_ascii() {
|
|
||||||
return Err(serde::de::Error::custom("Invalid Character"));
|
|
||||||
}
|
|
||||||
if start >= end {
|
|
||||||
return Err(serde::de::Error::custom("Invalid Bounds"));
|
|
||||||
}
|
|
||||||
let l = u32::from(end) - u32::from(start) + 1;
|
|
||||||
res.push((start..=end, l as usize));
|
|
||||||
len += l as usize;
|
|
||||||
a = None;
|
|
||||||
b = None;
|
|
||||||
in_range = false;
|
|
||||||
}
|
|
||||||
(Some(start), None, false) => {
|
|
||||||
len += 1;
|
|
||||||
res.push((start..=start, 1));
|
|
||||||
a = None;
|
|
||||||
}
|
|
||||||
(Some(_), None, true) => {
|
|
||||||
b = Some(',');
|
|
||||||
}
|
|
||||||
(None, None, false) => {
|
|
||||||
a = Some(',');
|
|
||||||
}
|
|
||||||
_ => {
|
|
||||||
return Err(serde::de::Error::custom("Syntax Error"));
|
|
||||||
}
|
|
||||||
},
|
|
||||||
'-' => {
|
|
||||||
if a.is_none() {
|
|
||||||
a = Some('-');
|
|
||||||
} else if !in_range {
|
|
||||||
in_range = true;
|
|
||||||
} else if b.is_none() {
|
|
||||||
b = Some('-')
|
|
||||||
} else {
|
|
||||||
return Err(serde::de::Error::custom("Syntax Error"));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
_ => {
|
|
||||||
if a.is_none() {
|
|
||||||
a = Some(c);
|
|
||||||
} else if in_range && b.is_none() {
|
|
||||||
b = Some(c);
|
|
||||||
} else {
|
|
||||||
return Err(serde::de::Error::custom("Syntax Error"));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
match (a, b) {
|
|
||||||
(Some(start), Some(end)) => {
|
|
||||||
if !end.is_ascii() {
|
|
||||||
return Err(serde::de::Error::custom("Invalid Character"));
|
|
||||||
}
|
|
||||||
if start >= end {
|
|
||||||
return Err(serde::de::Error::custom("Invalid Bounds"));
|
|
||||||
}
|
|
||||||
let l = u32::from(end) - u32::from(start) + 1;
|
|
||||||
res.push((start..=end, l as usize));
|
|
||||||
len += l as usize;
|
|
||||||
}
|
|
||||||
(Some(c), None) => {
|
|
||||||
len += 1;
|
|
||||||
res.push((c..=c, 1));
|
|
||||||
}
|
|
||||||
_ => (),
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(CharSet(res, len))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl serde::ser::Serialize for CharSet {
|
|
||||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
|
||||||
where
|
|
||||||
S: serde::ser::Serializer,
|
|
||||||
{
|
|
||||||
<&str>::serialize(
|
|
||||||
&self
|
|
||||||
.0
|
|
||||||
.iter()
|
|
||||||
.map(|r| match r.1 {
|
|
||||||
1 => format!("{}", r.0.start()),
|
|
||||||
_ => format!("{}-{}", r.0.start(), r.0.end()),
|
|
||||||
})
|
|
||||||
.collect::<Vec<_>>()
|
|
||||||
.join(",")
|
|
||||||
.as_str(),
|
|
||||||
serializer,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub trait MergeWith {
|
|
||||||
fn merge_with(&mut self, other: &serde_json::Value);
|
|
||||||
}
|
|
||||||
|
|
||||||
impl MergeWith for serde_json::Value {
|
|
||||||
fn merge_with(&mut self, other: &serde_json::Value) {
|
|
||||||
use serde_json::Value::Object;
|
|
||||||
if let (Object(orig), Object(ref other)) = (self, other) {
|
|
||||||
for (key, val) in other.into_iter() {
|
|
||||||
match (orig.get_mut(key), val) {
|
|
||||||
(Some(new_orig @ Object(_)), other @ Object(_)) => {
|
|
||||||
new_orig.merge_with(other);
|
|
||||||
}
|
|
||||||
(None, _) => {
|
|
||||||
orig.insert(key.clone(), val.clone());
|
|
||||||
}
|
|
||||||
_ => (),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn merge_with_tests() {
|
|
||||||
use serde_json::json;
|
|
||||||
|
|
||||||
let mut a = json!(
|
|
||||||
{"a": 1, "c": {"d": "123"}, "i": [1,2,3], "j": {}, "k":[1,2,3], "l": "test"}
|
|
||||||
);
|
|
||||||
a.merge_with(
|
|
||||||
&json!({"a":"a", "b": "b", "c":{"d":"d", "e":"e"}, "f":{"g":"g"}, "h": [1,2,3], "i":"i", "j":[1,2,3], "k":{}}),
|
|
||||||
);
|
|
||||||
assert_eq!(
|
|
||||||
a,
|
|
||||||
json!({"a": 1, "c": {"d": "123", "e":"e"}, "b":"b", "f": {"g":"g"}, "h":[1,2,3], "i":[1,2,3], "j": {}, "k":[1,2,3], "l": "test"})
|
|
||||||
)
|
|
||||||
}
|
|
||||||
pub mod serde_regex {
|
|
||||||
use regex::Regex;
|
|
||||||
use serde::*;
|
|
||||||
|
|
||||||
pub fn serialize<S>(regex: &Regex, serializer: S) -> Result<S::Ok, S::Error>
|
|
||||||
where
|
|
||||||
S: Serializer,
|
|
||||||
{
|
|
||||||
<&str>::serialize(®ex.as_str(), serializer)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn deserialize<'de, D>(deserializer: D) -> Result<Regex, D::Error>
|
|
||||||
where
|
|
||||||
D: Deserializer<'de>,
|
|
||||||
{
|
|
||||||
let s = String::deserialize(deserializer)?;
|
|
||||||
Regex::new(&s).map_err(|e| de::Error::custom(e))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone, Debug)]
|
|
||||||
pub struct NumRange<T: std::str::FromStr + std::fmt::Display + std::cmp::PartialOrd>(
|
|
||||||
pub (Bound<T>, Bound<T>),
|
|
||||||
);
|
|
||||||
impl<T> std::ops::Deref for NumRange<T>
|
|
||||||
where
|
|
||||||
T: std::str::FromStr + std::fmt::Display + std::cmp::PartialOrd,
|
|
||||||
{
|
|
||||||
type Target = (Bound<T>, Bound<T>);
|
|
||||||
|
|
||||||
fn deref(&self) -> &Self::Target {
|
|
||||||
&self.0
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl<'de, T> serde::de::Deserialize<'de> for NumRange<T>
|
|
||||||
where
|
|
||||||
T: std::str::FromStr + std::fmt::Display + std::cmp::PartialOrd,
|
|
||||||
<T as std::str::FromStr>::Err: std::fmt::Display,
|
|
||||||
{
|
|
||||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
|
||||||
where
|
|
||||||
D: serde::de::Deserializer<'de>,
|
|
||||||
{
|
|
||||||
let s = String::deserialize(deserializer)?;
|
|
||||||
let mut split = s.split(",");
|
|
||||||
let start = split
|
|
||||||
.next()
|
|
||||||
.map(|s| match s.get(..1) {
|
|
||||||
Some("(") => match s.get(1..2) {
|
|
||||||
Some("*") => Ok(Bound::Unbounded),
|
|
||||||
_ => s[1..]
|
|
||||||
.trim()
|
|
||||||
.parse()
|
|
||||||
.map(Bound::Excluded)
|
|
||||||
.map_err(|e| serde::de::Error::custom(e)),
|
|
||||||
},
|
|
||||||
Some("[") => s[1..]
|
|
||||||
.trim()
|
|
||||||
.parse()
|
|
||||||
.map(Bound::Included)
|
|
||||||
.map_err(|e| serde::de::Error::custom(e)),
|
|
||||||
_ => Err(serde::de::Error::custom(format!(
|
|
||||||
"Could not parse left bound: {}",
|
|
||||||
s
|
|
||||||
))),
|
|
||||||
})
|
|
||||||
.transpose()?
|
|
||||||
.unwrap();
|
|
||||||
let end = split
|
|
||||||
.next()
|
|
||||||
.map(|s| match s.get(s.len() - 1..) {
|
|
||||||
Some(")") => match s.get(s.len() - 2..s.len() - 1) {
|
|
||||||
Some("*") => Ok(Bound::Unbounded),
|
|
||||||
_ => s[..s.len() - 1]
|
|
||||||
.trim()
|
|
||||||
.parse()
|
|
||||||
.map(Bound::Excluded)
|
|
||||||
.map_err(|e| serde::de::Error::custom(e)),
|
|
||||||
},
|
|
||||||
Some("]") => s[..s.len() - 1]
|
|
||||||
.trim()
|
|
||||||
.parse()
|
|
||||||
.map(Bound::Included)
|
|
||||||
.map_err(|e| serde::de::Error::custom(e)),
|
|
||||||
_ => Err(serde::de::Error::custom(format!(
|
|
||||||
"Could not parse right bound: {}",
|
|
||||||
s
|
|
||||||
))),
|
|
||||||
})
|
|
||||||
.transpose()?
|
|
||||||
.unwrap_or(Bound::Unbounded);
|
|
||||||
|
|
||||||
Ok(NumRange((start, end)))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl<T> std::fmt::Display for NumRange<T>
|
|
||||||
where
|
|
||||||
T: std::str::FromStr + std::fmt::Display + std::cmp::PartialOrd,
|
|
||||||
{
|
|
||||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
|
||||||
match self.start_bound() {
|
|
||||||
Bound::Excluded(n) => write!(f, "({},", n)?,
|
|
||||||
Bound::Included(n) => write!(f, "[{},", n)?,
|
|
||||||
Bound::Unbounded => write!(f, "(*,")?,
|
|
||||||
};
|
|
||||||
match self.end_bound() {
|
|
||||||
Bound::Excluded(n) => write!(f, "{})", n),
|
|
||||||
Bound::Included(n) => write!(f, "{}]", n),
|
|
||||||
Bound::Unbounded => write!(f, "*)"),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl<T> serde::ser::Serialize for NumRange<T>
|
|
||||||
where
|
|
||||||
T: std::str::FromStr + std::fmt::Display + std::cmp::PartialOrd,
|
|
||||||
{
|
|
||||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
|
||||||
where
|
|
||||||
S: serde::ser::Serializer,
|
|
||||||
{
|
|
||||||
<&str>::serialize(&format!("{}", self).as_str(), serializer)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone, Debug)]
|
|
||||||
pub enum UniqueBy {
|
|
||||||
Any(Vec<UniqueBy>),
|
|
||||||
All(Vec<UniqueBy>),
|
|
||||||
Exactly(String),
|
|
||||||
NotUnique,
|
|
||||||
}
|
|
||||||
impl UniqueBy {
|
|
||||||
pub fn eq(&self, lhs: &Config, rhs: &Config) -> bool {
|
|
||||||
match self {
|
|
||||||
UniqueBy::Any(any) => any.iter().any(|u| u.eq(lhs, rhs)),
|
|
||||||
UniqueBy::All(all) => all.iter().all(|u| u.eq(lhs, rhs)),
|
|
||||||
UniqueBy::Exactly(key) => lhs.get(key) == rhs.get(key),
|
|
||||||
UniqueBy::NotUnique => false,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl Default for UniqueBy {
|
|
||||||
fn default() -> Self {
|
|
||||||
UniqueBy::NotUnique
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl<'de> serde::de::Deserialize<'de> for UniqueBy {
|
|
||||||
fn deserialize<D: serde::de::Deserializer<'de>>(deserializer: D) -> Result<Self, D::Error> {
|
|
||||||
struct Visitor;
|
|
||||||
impl<'de> serde::de::Visitor<'de> for Visitor {
|
|
||||||
type Value = UniqueBy;
|
|
||||||
fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result {
|
|
||||||
write!(formatter, "a key, an \"any\" object, or an \"all\" object")
|
|
||||||
}
|
|
||||||
fn visit_str<E: serde::de::Error>(self, v: &str) -> Result<Self::Value, E> {
|
|
||||||
Ok(UniqueBy::Exactly(v.to_owned()))
|
|
||||||
}
|
|
||||||
fn visit_string<E: serde::de::Error>(self, v: String) -> Result<Self::Value, E> {
|
|
||||||
Ok(UniqueBy::Exactly(v))
|
|
||||||
}
|
|
||||||
fn visit_map<A: serde::de::MapAccess<'de>>(
|
|
||||||
self,
|
|
||||||
mut map: A,
|
|
||||||
) -> Result<Self::Value, A::Error> {
|
|
||||||
let mut variant = None;
|
|
||||||
while let Some(key) = map.next_key::<Cow<str>>()? {
|
|
||||||
match key.as_ref() {
|
|
||||||
"any" => {
|
|
||||||
return Ok(UniqueBy::Any(map.next_value()?));
|
|
||||||
}
|
|
||||||
"all" => {
|
|
||||||
return Ok(UniqueBy::All(map.next_value()?));
|
|
||||||
}
|
|
||||||
_ => {
|
|
||||||
variant = Some(key);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Err(serde::de::Error::unknown_variant(
|
|
||||||
variant.unwrap_or_default().as_ref(),
|
|
||||||
&["any", "all"],
|
|
||||||
))
|
|
||||||
}
|
|
||||||
fn visit_unit<E: serde::de::Error>(self) -> Result<Self::Value, E> {
|
|
||||||
Ok(UniqueBy::NotUnique)
|
|
||||||
}
|
|
||||||
fn visit_none<E: serde::de::Error>(self) -> Result<Self::Value, E> {
|
|
||||||
Ok(UniqueBy::NotUnique)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
deserializer.deserialize_any(Visitor)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl serde::ser::Serialize for UniqueBy {
|
|
||||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
|
||||||
where
|
|
||||||
S: serde::ser::Serializer,
|
|
||||||
{
|
|
||||||
use serde::ser::SerializeMap;
|
|
||||||
|
|
||||||
match self {
|
|
||||||
UniqueBy::Any(any) => {
|
|
||||||
let mut map = serializer.serialize_map(Some(1))?;
|
|
||||||
map.serialize_key("any")?;
|
|
||||||
map.serialize_value(any)?;
|
|
||||||
map.end()
|
|
||||||
}
|
|
||||||
UniqueBy::All(all) => {
|
|
||||||
let mut map = serializer.serialize_map(Some(1))?;
|
|
||||||
map.serialize_key("all")?;
|
|
||||||
map.serialize_value(all)?;
|
|
||||||
map.end()
|
|
||||||
}
|
|
||||||
UniqueBy::Exactly(key) => serializer.serialize_str(key),
|
|
||||||
UniqueBy::NotUnique => serializer.serialize_unit(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,169 +0,0 @@
|
|||||||
use std::fs::File;
|
|
||||||
use std::io::BufReader;
|
|
||||||
use std::net::{Ipv4Addr, SocketAddr};
|
|
||||||
use std::path::{Path, PathBuf};
|
|
||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
use clap::ArgMatches;
|
|
||||||
use color_eyre::eyre::eyre;
|
|
||||||
use cookie_store::CookieStore;
|
|
||||||
use reqwest::Proxy;
|
|
||||||
use reqwest_cookie_store::CookieStoreMutex;
|
|
||||||
use rpc_toolkit::reqwest::{Client, Url};
|
|
||||||
use rpc_toolkit::url::Host;
|
|
||||||
use rpc_toolkit::Context;
|
|
||||||
use serde::Deserialize;
|
|
||||||
use tracing::instrument;
|
|
||||||
|
|
||||||
use crate::util::config::{load_config_from_paths, local_config_path};
|
|
||||||
use crate::ResultExt;
|
|
||||||
|
|
||||||
#[derive(Debug, Default, Deserialize)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
pub struct CliContextConfig {
|
|
||||||
pub bind_rpc: Option<SocketAddr>,
|
|
||||||
pub host: Option<Url>,
|
|
||||||
#[serde(deserialize_with = "crate::util::serde::deserialize_from_str_opt")]
|
|
||||||
#[serde(default)]
|
|
||||||
pub proxy: Option<Url>,
|
|
||||||
pub cookie_path: Option<PathBuf>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub struct CliContextSeed {
|
|
||||||
pub base_url: Url,
|
|
||||||
pub rpc_url: Url,
|
|
||||||
pub client: Client,
|
|
||||||
pub cookie_store: Arc<CookieStoreMutex>,
|
|
||||||
pub cookie_path: PathBuf,
|
|
||||||
}
|
|
||||||
impl Drop for CliContextSeed {
|
|
||||||
fn drop(&mut self) {
|
|
||||||
let tmp = format!("{}.tmp", self.cookie_path.display());
|
|
||||||
let parent_dir = self.cookie_path.parent().unwrap_or(Path::new("/"));
|
|
||||||
if !parent_dir.exists() {
|
|
||||||
std::fs::create_dir_all(&parent_dir).unwrap();
|
|
||||||
}
|
|
||||||
let mut writer = fd_lock_rs::FdLock::lock(
|
|
||||||
File::create(&tmp).unwrap(),
|
|
||||||
fd_lock_rs::LockType::Exclusive,
|
|
||||||
true,
|
|
||||||
)
|
|
||||||
.unwrap();
|
|
||||||
let store = self.cookie_store.lock().unwrap();
|
|
||||||
store.save_json(&mut *writer).unwrap();
|
|
||||||
writer.sync_all().unwrap();
|
|
||||||
std::fs::rename(tmp, &self.cookie_path).unwrap();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
const DEFAULT_HOST: Host<&'static str> = Host::Ipv4(Ipv4Addr::new(127, 0, 0, 1));
|
|
||||||
const DEFAULT_PORT: u16 = 5959;
|
|
||||||
|
|
||||||
#[derive(Debug, Clone)]
|
|
||||||
pub struct CliContext(Arc<CliContextSeed>);
|
|
||||||
impl CliContext {
|
|
||||||
/// BLOCKING
|
|
||||||
#[instrument(skip(matches))]
|
|
||||||
pub fn init(matches: &ArgMatches) -> Result<Self, crate::Error> {
|
|
||||||
let local_config_path = local_config_path();
|
|
||||||
let base: CliContextConfig = load_config_from_paths(
|
|
||||||
matches
|
|
||||||
.values_of("config")
|
|
||||||
.into_iter()
|
|
||||||
.flatten()
|
|
||||||
.map(|p| Path::new(p))
|
|
||||||
.chain(local_config_path.as_deref().into_iter())
|
|
||||||
.chain(std::iter::once(Path::new(crate::util::config::CONFIG_PATH))),
|
|
||||||
)?;
|
|
||||||
let mut url = if let Some(host) = matches.value_of("host") {
|
|
||||||
host.parse()?
|
|
||||||
} else if let Some(host) = base.host {
|
|
||||||
host
|
|
||||||
} else {
|
|
||||||
format!(
|
|
||||||
"http://{}",
|
|
||||||
base.bind_rpc.unwrap_or(([127, 0, 0, 1], 80).into())
|
|
||||||
)
|
|
||||||
.parse()?
|
|
||||||
};
|
|
||||||
let proxy = if let Some(proxy) = matches.value_of("proxy") {
|
|
||||||
Some(proxy.parse()?)
|
|
||||||
} else {
|
|
||||||
base.proxy
|
|
||||||
};
|
|
||||||
|
|
||||||
let cookie_path = base.cookie_path.unwrap_or_else(|| {
|
|
||||||
local_config_path
|
|
||||||
.as_deref()
|
|
||||||
.unwrap_or_else(|| Path::new(crate::util::config::CONFIG_PATH))
|
|
||||||
.parent()
|
|
||||||
.unwrap_or(Path::new("/"))
|
|
||||||
.join(".cookies.json")
|
|
||||||
});
|
|
||||||
let cookie_store = Arc::new(CookieStoreMutex::new(if cookie_path.exists() {
|
|
||||||
CookieStore::load_json(BufReader::new(File::open(&cookie_path)?))
|
|
||||||
.map_err(|e| eyre!("{}", e))
|
|
||||||
.with_kind(crate::ErrorKind::Deserialization)?
|
|
||||||
} else {
|
|
||||||
CookieStore::default()
|
|
||||||
}));
|
|
||||||
Ok(CliContext(Arc::new(CliContextSeed {
|
|
||||||
base_url: url.clone(),
|
|
||||||
rpc_url: {
|
|
||||||
url.path_segments_mut()
|
|
||||||
.map_err(|_| eyre!("Url cannot be base"))
|
|
||||||
.with_kind(crate::ErrorKind::ParseUrl)?
|
|
||||||
.push("rpc")
|
|
||||||
.push("v1");
|
|
||||||
url
|
|
||||||
},
|
|
||||||
client: {
|
|
||||||
let mut builder = Client::builder().cookie_provider(cookie_store.clone());
|
|
||||||
if let Some(proxy) = proxy {
|
|
||||||
builder =
|
|
||||||
builder.proxy(Proxy::all(proxy).with_kind(crate::ErrorKind::ParseUrl)?)
|
|
||||||
}
|
|
||||||
builder.build().expect("cannot fail")
|
|
||||||
},
|
|
||||||
cookie_store,
|
|
||||||
cookie_path,
|
|
||||||
})))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl std::ops::Deref for CliContext {
|
|
||||||
type Target = CliContextSeed;
|
|
||||||
fn deref(&self) -> &Self::Target {
|
|
||||||
&*self.0
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl Context for CliContext {
|
|
||||||
fn protocol(&self) -> &str {
|
|
||||||
self.0.base_url.scheme()
|
|
||||||
}
|
|
||||||
fn host(&self) -> Host<&str> {
|
|
||||||
self.0.base_url.host().unwrap_or(DEFAULT_HOST)
|
|
||||||
}
|
|
||||||
fn port(&self) -> u16 {
|
|
||||||
self.0.base_url.port().unwrap_or(DEFAULT_PORT)
|
|
||||||
}
|
|
||||||
fn path(&self) -> &str {
|
|
||||||
self.0.rpc_url.path()
|
|
||||||
}
|
|
||||||
fn url(&self) -> Url {
|
|
||||||
self.0.rpc_url.clone()
|
|
||||||
}
|
|
||||||
fn client(&self) -> &Client {
|
|
||||||
&self.0.client
|
|
||||||
}
|
|
||||||
}
|
|
||||||
/// When we had an empty proxy the system wasn't working like it used to, which allowed empty proxy
|
|
||||||
#[test]
|
|
||||||
fn test_cli_proxy_empty() {
|
|
||||||
serde_yaml::from_str::<CliContextConfig>(
|
|
||||||
"
|
|
||||||
bind_rpc:
|
|
||||||
",
|
|
||||||
)
|
|
||||||
.unwrap();
|
|
||||||
}
|
|
||||||
@@ -1,95 +0,0 @@
|
|||||||
use std::net::{IpAddr, SocketAddr};
|
|
||||||
use std::ops::Deref;
|
|
||||||
use std::path::{Path, PathBuf};
|
|
||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
use rpc_toolkit::yajrc::RpcError;
|
|
||||||
use rpc_toolkit::Context;
|
|
||||||
use serde::Deserialize;
|
|
||||||
use tokio::fs::File;
|
|
||||||
use tokio::sync::broadcast::Sender;
|
|
||||||
use tracing::instrument;
|
|
||||||
use url::Host;
|
|
||||||
|
|
||||||
use crate::shutdown::Shutdown;
|
|
||||||
use crate::util::io::from_toml_async_reader;
|
|
||||||
use crate::util::AsyncFileExt;
|
|
||||||
use crate::{Error, ResultExt};
|
|
||||||
|
|
||||||
#[derive(Debug, Default, Deserialize)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
pub struct DiagnosticContextConfig {
|
|
||||||
pub bind_rpc: Option<SocketAddr>,
|
|
||||||
pub datadir: Option<PathBuf>,
|
|
||||||
}
|
|
||||||
impl DiagnosticContextConfig {
|
|
||||||
#[instrument(skip(path))]
|
|
||||||
pub async fn load<P: AsRef<Path>>(path: Option<P>) -> Result<Self, Error> {
|
|
||||||
let cfg_path = path
|
|
||||||
.as_ref()
|
|
||||||
.map(|p| p.as_ref())
|
|
||||||
.unwrap_or(Path::new(crate::util::config::CONFIG_PATH));
|
|
||||||
if let Some(f) = File::maybe_open(cfg_path)
|
|
||||||
.await
|
|
||||||
.with_ctx(|_| (crate::ErrorKind::Filesystem, cfg_path.display().to_string()))?
|
|
||||||
{
|
|
||||||
from_toml_async_reader(f).await
|
|
||||||
} else {
|
|
||||||
Ok(Self::default())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
pub fn datadir(&self) -> &Path {
|
|
||||||
self.datadir
|
|
||||||
.as_deref()
|
|
||||||
.unwrap_or_else(|| Path::new("/embassy-data"))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct DiagnosticContextSeed {
|
|
||||||
pub bind_rpc: SocketAddr,
|
|
||||||
pub datadir: PathBuf,
|
|
||||||
pub shutdown: Sender<Option<Shutdown>>,
|
|
||||||
pub error: Arc<RpcError>,
|
|
||||||
pub disk_guid: Option<Arc<String>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone)]
|
|
||||||
pub struct DiagnosticContext(Arc<DiagnosticContextSeed>);
|
|
||||||
impl DiagnosticContext {
|
|
||||||
#[instrument(skip(path))]
|
|
||||||
pub async fn init<P: AsRef<Path>>(
|
|
||||||
path: Option<P>,
|
|
||||||
disk_guid: Option<Arc<String>>,
|
|
||||||
error: Error,
|
|
||||||
) -> Result<Self, Error> {
|
|
||||||
let cfg = DiagnosticContextConfig::load(path).await?;
|
|
||||||
|
|
||||||
let (shutdown, _) = tokio::sync::broadcast::channel(1);
|
|
||||||
|
|
||||||
Ok(Self(Arc::new(DiagnosticContextSeed {
|
|
||||||
bind_rpc: cfg.bind_rpc.unwrap_or(([127, 0, 0, 1], 5959).into()),
|
|
||||||
datadir: cfg.datadir().to_owned(),
|
|
||||||
shutdown,
|
|
||||||
disk_guid,
|
|
||||||
error: Arc::new(error.into()),
|
|
||||||
})))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Context for DiagnosticContext {
|
|
||||||
fn host(&self) -> Host<&str> {
|
|
||||||
match self.0.bind_rpc.ip() {
|
|
||||||
IpAddr::V4(a) => Host::Ipv4(a),
|
|
||||||
IpAddr::V6(a) => Host::Ipv6(a),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
fn port(&self) -> u16 {
|
|
||||||
self.0.bind_rpc.port()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl Deref for DiagnosticContext {
|
|
||||||
type Target = DiagnosticContextSeed;
|
|
||||||
fn deref(&self) -> &Self::Target {
|
|
||||||
&*self.0
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,37 +0,0 @@
|
|||||||
pub mod cli;
|
|
||||||
pub mod diagnostic;
|
|
||||||
pub mod rpc;
|
|
||||||
pub mod sdk;
|
|
||||||
pub mod setup;
|
|
||||||
|
|
||||||
pub use cli::CliContext;
|
|
||||||
pub use diagnostic::DiagnosticContext;
|
|
||||||
pub use rpc::RpcContext;
|
|
||||||
pub use sdk::SdkContext;
|
|
||||||
pub use setup::SetupContext;
|
|
||||||
|
|
||||||
impl From<CliContext> for () {
|
|
||||||
fn from(_: CliContext) -> Self {
|
|
||||||
()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl From<DiagnosticContext> for () {
|
|
||||||
fn from(_: DiagnosticContext) -> Self {
|
|
||||||
()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl From<RpcContext> for () {
|
|
||||||
fn from(_: RpcContext) -> Self {
|
|
||||||
()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl From<SdkContext> for () {
|
|
||||||
fn from(_: SdkContext) -> Self {
|
|
||||||
()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl From<SetupContext> for () {
|
|
||||||
fn from(_: SetupContext) -> Self {
|
|
||||||
()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,482 +0,0 @@
|
|||||||
use std::collections::{BTreeMap, VecDeque};
|
|
||||||
use std::net::{IpAddr, Ipv4Addr, SocketAddr, SocketAddrV4};
|
|
||||||
use std::ops::Deref;
|
|
||||||
use std::path::{Path, PathBuf};
|
|
||||||
use std::sync::atomic::{AtomicBool, Ordering};
|
|
||||||
use std::sync::Arc;
|
|
||||||
use std::time::Duration;
|
|
||||||
|
|
||||||
use bollard::Docker;
|
|
||||||
use helpers::to_tmp_path;
|
|
||||||
use patch_db::json_ptr::JsonPointer;
|
|
||||||
use patch_db::{DbHandle, LockReceipt, LockType, PatchDb, Revision};
|
|
||||||
use reqwest::Url;
|
|
||||||
use rpc_toolkit::url::Host;
|
|
||||||
use rpc_toolkit::Context;
|
|
||||||
use serde::Deserialize;
|
|
||||||
use sqlx::postgres::PgConnectOptions;
|
|
||||||
use sqlx::PgPool;
|
|
||||||
use tokio::fs::File;
|
|
||||||
use tokio::process::Command;
|
|
||||||
use tokio::sync::{broadcast, oneshot, Mutex, RwLock};
|
|
||||||
use tracing::instrument;
|
|
||||||
|
|
||||||
use crate::core::rpc_continuations::{RequestGuid, RestHandler, RpcContinuation};
|
|
||||||
use crate::db::model::{Database, InstalledPackageDataEntry, PackageDataEntry};
|
|
||||||
use crate::init::{init_postgres, pgloader};
|
|
||||||
use crate::install::cleanup::{cleanup_failed, uninstall, CleanupFailedReceipts};
|
|
||||||
use crate::manager::ManagerMap;
|
|
||||||
use crate::middleware::auth::HashSessionToken;
|
|
||||||
use crate::net::tor::os_key;
|
|
||||||
use crate::net::wifi::WpaCli;
|
|
||||||
use crate::net::NetController;
|
|
||||||
use crate::notifications::NotificationManager;
|
|
||||||
use crate::setup::password_hash;
|
|
||||||
use crate::shutdown::Shutdown;
|
|
||||||
use crate::status::{MainStatus, Status};
|
|
||||||
use crate::util::io::from_yaml_async_reader;
|
|
||||||
use crate::util::{AsyncFileExt, Invoke};
|
|
||||||
use crate::{Error, ErrorKind, ResultExt};
|
|
||||||
|
|
||||||
#[derive(Debug, Default, Deserialize)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
pub struct RpcContextConfig {
|
|
||||||
pub migration_batch_rows: Option<usize>,
|
|
||||||
pub migration_prefetch_rows: Option<usize>,
|
|
||||||
pub bind_rpc: Option<SocketAddr>,
|
|
||||||
pub bind_ws: Option<SocketAddr>,
|
|
||||||
pub bind_static: Option<SocketAddr>,
|
|
||||||
pub tor_control: Option<SocketAddr>,
|
|
||||||
pub tor_socks: Option<SocketAddr>,
|
|
||||||
pub dns_bind: Option<Vec<SocketAddr>>,
|
|
||||||
pub revision_cache_size: Option<usize>,
|
|
||||||
pub datadir: Option<PathBuf>,
|
|
||||||
pub log_server: Option<Url>,
|
|
||||||
}
|
|
||||||
impl RpcContextConfig {
|
|
||||||
pub async fn load<P: AsRef<Path>>(path: Option<P>) -> Result<Self, Error> {
|
|
||||||
let cfg_path = path
|
|
||||||
.as_ref()
|
|
||||||
.map(|p| p.as_ref())
|
|
||||||
.unwrap_or(Path::new(crate::util::config::CONFIG_PATH));
|
|
||||||
if let Some(f) = File::maybe_open(cfg_path)
|
|
||||||
.await
|
|
||||||
.with_ctx(|_| (crate::ErrorKind::Filesystem, cfg_path.display().to_string()))?
|
|
||||||
{
|
|
||||||
from_yaml_async_reader(f).await
|
|
||||||
} else {
|
|
||||||
Ok(Self::default())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
pub fn datadir(&self) -> &Path {
|
|
||||||
self.datadir
|
|
||||||
.as_deref()
|
|
||||||
.unwrap_or_else(|| Path::new("/embassy-data"))
|
|
||||||
}
|
|
||||||
pub async fn db(&self, secret_store: &PgPool) -> Result<PatchDb, Error> {
|
|
||||||
let db_path = self.datadir().join("main").join("embassy.db");
|
|
||||||
let db = PatchDb::open(&db_path)
|
|
||||||
.await
|
|
||||||
.with_ctx(|_| (crate::ErrorKind::Filesystem, db_path.display().to_string()))?;
|
|
||||||
if !db.exists(&<JsonPointer>::default()).await {
|
|
||||||
db.put(
|
|
||||||
&<JsonPointer>::default(),
|
|
||||||
&Database::init(
|
|
||||||
&os_key(&mut secret_store.acquire().await?).await?,
|
|
||||||
password_hash(&mut secret_store.acquire().await?).await?,
|
|
||||||
),
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
}
|
|
||||||
Ok(db)
|
|
||||||
}
|
|
||||||
#[instrument]
|
|
||||||
pub async fn secret_store(&self) -> Result<PgPool, Error> {
|
|
||||||
init_postgres(self.datadir()).await?;
|
|
||||||
let secret_store =
|
|
||||||
PgPool::connect_with(PgConnectOptions::new().database("secrets").username("root"))
|
|
||||||
.await?;
|
|
||||||
sqlx::migrate!()
|
|
||||||
.run(&secret_store)
|
|
||||||
.await
|
|
||||||
.with_kind(crate::ErrorKind::Database)?;
|
|
||||||
let old_db_path = self.datadir().join("main/secrets.db");
|
|
||||||
if tokio::fs::metadata(&old_db_path).await.is_ok() {
|
|
||||||
pgloader(
|
|
||||||
&old_db_path,
|
|
||||||
self.migration_batch_rows.unwrap_or(25000),
|
|
||||||
self.migration_prefetch_rows.unwrap_or(100_000),
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
}
|
|
||||||
Ok(secret_store)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct RpcContextSeed {
|
|
||||||
is_closed: AtomicBool,
|
|
||||||
pub bind_rpc: SocketAddr,
|
|
||||||
pub bind_ws: SocketAddr,
|
|
||||||
pub bind_static: SocketAddr,
|
|
||||||
pub datadir: PathBuf,
|
|
||||||
pub disk_guid: Arc<String>,
|
|
||||||
pub db: PatchDb,
|
|
||||||
pub secret_store: PgPool,
|
|
||||||
pub docker: Docker,
|
|
||||||
pub net_controller: NetController,
|
|
||||||
pub managers: ManagerMap,
|
|
||||||
pub revision_cache_size: usize,
|
|
||||||
pub revision_cache: RwLock<VecDeque<Arc<Revision>>>,
|
|
||||||
pub metrics_cache: RwLock<Option<crate::system::Metrics>>,
|
|
||||||
pub shutdown: broadcast::Sender<Option<Shutdown>>,
|
|
||||||
pub tor_socks: SocketAddr,
|
|
||||||
pub notification_manager: NotificationManager,
|
|
||||||
pub open_authed_websockets: Mutex<BTreeMap<HashSessionToken, Vec<oneshot::Sender<()>>>>,
|
|
||||||
pub rpc_stream_continuations: Mutex<BTreeMap<RequestGuid, RpcContinuation>>,
|
|
||||||
pub wifi_manager: Arc<RwLock<WpaCli>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct RpcCleanReceipts {
|
|
||||||
cleanup_receipts: CleanupFailedReceipts,
|
|
||||||
packages: LockReceipt<crate::db::model::AllPackageData, ()>,
|
|
||||||
package: LockReceipt<crate::db::model::PackageDataEntry, String>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl RpcCleanReceipts {
|
|
||||||
pub async fn new<'a>(db: &'a mut impl DbHandle) -> Result<Self, Error> {
|
|
||||||
let mut locks = Vec::new();
|
|
||||||
|
|
||||||
let setup = Self::setup(&mut locks);
|
|
||||||
Ok(setup(&db.lock_all(locks).await?)?)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn setup(
|
|
||||||
locks: &mut Vec<patch_db::LockTargetId>,
|
|
||||||
) -> impl FnOnce(&patch_db::Verifier) -> Result<Self, Error> {
|
|
||||||
let cleanup_receipts = CleanupFailedReceipts::setup(locks);
|
|
||||||
|
|
||||||
let packages = crate::db::DatabaseModel::new()
|
|
||||||
.package_data()
|
|
||||||
.make_locker(LockType::Write)
|
|
||||||
.add_to_keys(locks);
|
|
||||||
let package = crate::db::DatabaseModel::new()
|
|
||||||
.package_data()
|
|
||||||
.star()
|
|
||||||
.make_locker(LockType::Write)
|
|
||||||
.add_to_keys(locks);
|
|
||||||
move |skeleton_key| {
|
|
||||||
Ok(Self {
|
|
||||||
cleanup_receipts: cleanup_receipts(skeleton_key)?,
|
|
||||||
packages: packages.verify(skeleton_key)?,
|
|
||||||
package: package.verify(skeleton_key)?,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct RpcSetNginxReceipts {
|
|
||||||
server_info: LockReceipt<crate::db::model::ServerInfo, ()>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl RpcSetNginxReceipts {
|
|
||||||
pub async fn new(db: &'_ mut impl DbHandle) -> Result<Self, Error> {
|
|
||||||
let mut locks = Vec::new();
|
|
||||||
|
|
||||||
let setup = Self::setup(&mut locks);
|
|
||||||
Ok(setup(&db.lock_all(locks).await?)?)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn setup(
|
|
||||||
locks: &mut Vec<patch_db::LockTargetId>,
|
|
||||||
) -> impl FnOnce(&patch_db::Verifier) -> Result<Self, Error> {
|
|
||||||
let server_info = crate::db::DatabaseModel::new()
|
|
||||||
.server_info()
|
|
||||||
.make_locker(LockType::Read)
|
|
||||||
.add_to_keys(locks);
|
|
||||||
move |skeleton_key| {
|
|
||||||
Ok(Self {
|
|
||||||
server_info: server_info.verify(skeleton_key)?,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone)]
|
|
||||||
pub struct RpcContext(Arc<RpcContextSeed>);
|
|
||||||
impl RpcContext {
|
|
||||||
#[instrument(skip(cfg_path))]
|
|
||||||
pub async fn init<P: AsRef<Path>>(
|
|
||||||
cfg_path: Option<P>,
|
|
||||||
disk_guid: Arc<String>,
|
|
||||||
) -> Result<Self, Error> {
|
|
||||||
let base = RpcContextConfig::load(cfg_path).await?;
|
|
||||||
tracing::info!("Loaded Config");
|
|
||||||
let tor_proxy = base.tor_socks.unwrap_or(SocketAddr::V4(SocketAddrV4::new(
|
|
||||||
Ipv4Addr::new(127, 0, 0, 1),
|
|
||||||
9050,
|
|
||||||
)));
|
|
||||||
let (shutdown, _) = tokio::sync::broadcast::channel(1);
|
|
||||||
let secret_store = base.secret_store().await?;
|
|
||||||
tracing::info!("Opened Pg DB");
|
|
||||||
let db = base.db(&secret_store).await?;
|
|
||||||
tracing::info!("Opened PatchDB");
|
|
||||||
let mut docker = Docker::connect_with_unix_defaults()?;
|
|
||||||
docker.set_timeout(Duration::from_secs(600));
|
|
||||||
tracing::info!("Connected to Docker");
|
|
||||||
let net_controller = NetController::init(
|
|
||||||
([127, 0, 0, 1], 80).into(),
|
|
||||||
crate::net::tor::os_key(&mut secret_store.acquire().await?).await?,
|
|
||||||
base.tor_control
|
|
||||||
.unwrap_or(SocketAddr::from(([127, 0, 0, 1], 9051))),
|
|
||||||
base.dns_bind
|
|
||||||
.as_ref()
|
|
||||||
.map(|v| v.as_slice())
|
|
||||||
.unwrap_or(&[SocketAddr::from(([127, 0, 0, 1], 53))]),
|
|
||||||
secret_store.clone(),
|
|
||||||
None,
|
|
||||||
&mut db.handle(),
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
tracing::info!("Initialized Net Controller");
|
|
||||||
let managers = ManagerMap::default();
|
|
||||||
let metrics_cache = RwLock::new(None);
|
|
||||||
let notification_manager = NotificationManager::new(secret_store.clone());
|
|
||||||
tracing::info!("Initialized Notification Manager");
|
|
||||||
let seed = Arc::new(RpcContextSeed {
|
|
||||||
is_closed: AtomicBool::new(false),
|
|
||||||
bind_rpc: base.bind_rpc.unwrap_or(([127, 0, 0, 1], 5959).into()),
|
|
||||||
bind_ws: base.bind_ws.unwrap_or(([127, 0, 0, 1], 5960).into()),
|
|
||||||
bind_static: base.bind_static.unwrap_or(([127, 0, 0, 1], 5961).into()),
|
|
||||||
datadir: base.datadir().to_path_buf(),
|
|
||||||
disk_guid,
|
|
||||||
db,
|
|
||||||
secret_store,
|
|
||||||
docker,
|
|
||||||
net_controller,
|
|
||||||
managers,
|
|
||||||
revision_cache_size: base.revision_cache_size.unwrap_or(512),
|
|
||||||
revision_cache: RwLock::new(VecDeque::new()),
|
|
||||||
metrics_cache,
|
|
||||||
shutdown,
|
|
||||||
tor_socks: tor_proxy,
|
|
||||||
notification_manager,
|
|
||||||
open_authed_websockets: Mutex::new(BTreeMap::new()),
|
|
||||||
rpc_stream_continuations: Mutex::new(BTreeMap::new()),
|
|
||||||
wifi_manager: Arc::new(RwLock::new(WpaCli::init("wlan0".to_string()))),
|
|
||||||
});
|
|
||||||
|
|
||||||
let res = Self(seed);
|
|
||||||
res.cleanup().await?;
|
|
||||||
tracing::info!("Cleaned up transient states");
|
|
||||||
res.managers
|
|
||||||
.init(
|
|
||||||
&res,
|
|
||||||
&mut res.db.handle(),
|
|
||||||
&mut res.secret_store.acquire().await?,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
tracing::info!("Initialized Package Managers");
|
|
||||||
Ok(res)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[instrument(skip(self, db, receipts))]
|
|
||||||
pub async fn set_nginx_conf<Db: DbHandle>(
|
|
||||||
&self,
|
|
||||||
db: &mut Db,
|
|
||||||
receipts: RpcSetNginxReceipts,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
tokio::fs::write("/etc/nginx/sites-available/default", {
|
|
||||||
let info = receipts.server_info.get(db).await?;
|
|
||||||
format!(
|
|
||||||
include_str!("../nginx/main-ui.conf.template"),
|
|
||||||
lan_hostname = info.lan_address.host_str().unwrap(),
|
|
||||||
tor_hostname = info.tor_address.host_str().unwrap(),
|
|
||||||
)
|
|
||||||
})
|
|
||||||
.await
|
|
||||||
.with_ctx(|_| {
|
|
||||||
(
|
|
||||||
crate::ErrorKind::Filesystem,
|
|
||||||
"/etc/nginx/sites-available/default",
|
|
||||||
)
|
|
||||||
})?;
|
|
||||||
Command::new("systemctl")
|
|
||||||
.arg("reload")
|
|
||||||
.arg("nginx")
|
|
||||||
.invoke(crate::ErrorKind::Nginx)
|
|
||||||
.await?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
#[instrument(skip(self))]
|
|
||||||
pub async fn shutdown(self) -> Result<(), Error> {
|
|
||||||
self.managers.empty().await?;
|
|
||||||
self.secret_store.close().await;
|
|
||||||
self.is_closed.store(true, Ordering::SeqCst);
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[instrument(skip(self))]
|
|
||||||
pub async fn cleanup(&self) -> Result<(), Error> {
|
|
||||||
let mut db = self.db.handle();
|
|
||||||
let receipts = RpcCleanReceipts::new(&mut db).await?;
|
|
||||||
for (package_id, package) in receipts.packages.get(&mut db).await?.0 {
|
|
||||||
if let Err(e) = async {
|
|
||||||
match package {
|
|
||||||
PackageDataEntry::Installing { .. }
|
|
||||||
| PackageDataEntry::Restoring { .. }
|
|
||||||
| PackageDataEntry::Updating { .. } => {
|
|
||||||
cleanup_failed(self, &mut db, &package_id, &receipts.cleanup_receipts)
|
|
||||||
.await?;
|
|
||||||
}
|
|
||||||
PackageDataEntry::Removing { .. } => {
|
|
||||||
uninstall(
|
|
||||||
self,
|
|
||||||
&mut db,
|
|
||||||
&mut self.secret_store.acquire().await?,
|
|
||||||
&package_id,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
}
|
|
||||||
PackageDataEntry::Installed {
|
|
||||||
installed,
|
|
||||||
static_files,
|
|
||||||
manifest,
|
|
||||||
} => {
|
|
||||||
for (volume_id, volume_info) in &*manifest.volumes {
|
|
||||||
let tmp_path = to_tmp_path(volume_info.path_for(
|
|
||||||
&self.datadir,
|
|
||||||
&package_id,
|
|
||||||
&manifest.version,
|
|
||||||
&volume_id,
|
|
||||||
))
|
|
||||||
.with_kind(ErrorKind::Filesystem)?;
|
|
||||||
if tokio::fs::metadata(&tmp_path).await.is_ok() {
|
|
||||||
tokio::fs::remove_dir_all(&tmp_path).await?;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
let status = installed.status;
|
|
||||||
let main = match status.main {
|
|
||||||
MainStatus::BackingUp { started, .. } => {
|
|
||||||
if let Some(_) = started {
|
|
||||||
MainStatus::Starting { restarting: false }
|
|
||||||
} else {
|
|
||||||
MainStatus::Stopped
|
|
||||||
}
|
|
||||||
}
|
|
||||||
MainStatus::Running { .. } => {
|
|
||||||
MainStatus::Starting { restarting: false }
|
|
||||||
}
|
|
||||||
a => a.clone(),
|
|
||||||
};
|
|
||||||
let new_package = PackageDataEntry::Installed {
|
|
||||||
installed: InstalledPackageDataEntry {
|
|
||||||
status: Status { main, ..status },
|
|
||||||
..installed
|
|
||||||
},
|
|
||||||
static_files,
|
|
||||||
manifest,
|
|
||||||
};
|
|
||||||
receipts
|
|
||||||
.package
|
|
||||||
.set(&mut db, new_package, &package_id)
|
|
||||||
.await?;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Ok::<_, Error>(())
|
|
||||||
}
|
|
||||||
.await
|
|
||||||
{
|
|
||||||
tracing::error!("Failed to clean up package {}: {}", package_id, e);
|
|
||||||
tracing::debug!("{:?}", e);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[instrument(skip(self))]
|
|
||||||
pub async fn clean_continuations(&self) {
|
|
||||||
let mut continuations = self.rpc_stream_continuations.lock().await;
|
|
||||||
let mut to_remove = Vec::new();
|
|
||||||
for (guid, cont) in &*continuations {
|
|
||||||
if cont.is_timed_out() {
|
|
||||||
to_remove.push(guid.clone());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for guid in to_remove {
|
|
||||||
continuations.remove(&guid);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[instrument(skip(self, handler))]
|
|
||||||
pub async fn add_continuation(&self, guid: RequestGuid, handler: RpcContinuation) {
|
|
||||||
self.clean_continuations().await;
|
|
||||||
self.rpc_stream_continuations
|
|
||||||
.lock()
|
|
||||||
.await
|
|
||||||
.insert(guid, handler);
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn get_continuation_handler(&self, guid: &RequestGuid) -> Option<RestHandler> {
|
|
||||||
let mut continuations = self.rpc_stream_continuations.lock().await;
|
|
||||||
if let Some(cont) = continuations.remove(guid) {
|
|
||||||
cont.into_handler().await
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn get_ws_continuation_handler(&self, guid: &RequestGuid) -> Option<RestHandler> {
|
|
||||||
let continuations = self.rpc_stream_continuations.lock().await;
|
|
||||||
if matches!(continuations.get(guid), Some(RpcContinuation::WebSocket(_))) {
|
|
||||||
drop(continuations);
|
|
||||||
self.get_continuation_handler(guid).await
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn get_rest_continuation_handler(&self, guid: &RequestGuid) -> Option<RestHandler> {
|
|
||||||
let continuations = self.rpc_stream_continuations.lock().await;
|
|
||||||
if matches!(continuations.get(guid), Some(RpcContinuation::Rest(_))) {
|
|
||||||
drop(continuations);
|
|
||||||
self.get_continuation_handler(guid).await
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl Context for RpcContext {
|
|
||||||
fn host(&self) -> Host<&str> {
|
|
||||||
match self.0.bind_rpc.ip() {
|
|
||||||
IpAddr::V4(a) => Host::Ipv4(a),
|
|
||||||
IpAddr::V6(a) => Host::Ipv6(a),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
fn port(&self) -> u16 {
|
|
||||||
self.0.bind_rpc.port()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl Deref for RpcContext {
|
|
||||||
type Target = RpcContextSeed;
|
|
||||||
fn deref(&self) -> &Self::Target {
|
|
||||||
#[cfg(feature = "unstable")]
|
|
||||||
if self.0.is_closed.load(Ordering::SeqCst) {
|
|
||||||
panic!(
|
|
||||||
"RpcContext used after shutdown! {}",
|
|
||||||
tracing_error::SpanTrace::capture()
|
|
||||||
);
|
|
||||||
}
|
|
||||||
&*self.0
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl Drop for RpcContext {
|
|
||||||
fn drop(&mut self) {
|
|
||||||
#[cfg(feature = "unstable")]
|
|
||||||
if self.0.is_closed.load(Ordering::SeqCst) {
|
|
||||||
tracing::info!(
|
|
||||||
"RpcContext dropped. {} left.",
|
|
||||||
Arc::strong_count(&self.0) - 1
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,76 +0,0 @@
|
|||||||
use std::path::{Path, PathBuf};
|
|
||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
use clap::ArgMatches;
|
|
||||||
use color_eyre::eyre::eyre;
|
|
||||||
use rpc_toolkit::Context;
|
|
||||||
use serde::Deserialize;
|
|
||||||
use tracing::instrument;
|
|
||||||
|
|
||||||
use crate::util::config::{load_config_from_paths, local_config_path};
|
|
||||||
use crate::{Error, ResultExt};
|
|
||||||
|
|
||||||
#[derive(Debug, Default, Deserialize)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
pub struct SdkContextConfig {
|
|
||||||
pub developer_key_path: Option<PathBuf>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub struct SdkContextSeed {
|
|
||||||
pub developer_key_path: PathBuf,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone)]
|
|
||||||
pub struct SdkContext(Arc<SdkContextSeed>);
|
|
||||||
impl SdkContext {
|
|
||||||
/// BLOCKING
|
|
||||||
#[instrument(skip(matches))]
|
|
||||||
pub fn init(matches: &ArgMatches) -> Result<Self, crate::Error> {
|
|
||||||
let local_config_path = local_config_path();
|
|
||||||
let base: SdkContextConfig = load_config_from_paths(
|
|
||||||
matches
|
|
||||||
.values_of("config")
|
|
||||||
.into_iter()
|
|
||||||
.flatten()
|
|
||||||
.map(|p| Path::new(p))
|
|
||||||
.chain(local_config_path.as_deref().into_iter())
|
|
||||||
.chain(std::iter::once(Path::new(crate::util::config::CONFIG_PATH))),
|
|
||||||
)?;
|
|
||||||
Ok(SdkContext(Arc::new(SdkContextSeed {
|
|
||||||
developer_key_path: base.developer_key_path.unwrap_or_else(|| {
|
|
||||||
local_config_path
|
|
||||||
.as_deref()
|
|
||||||
.unwrap_or_else(|| Path::new(crate::util::config::CONFIG_PATH))
|
|
||||||
.parent()
|
|
||||||
.unwrap_or(Path::new("/"))
|
|
||||||
.join("developer.key.pem")
|
|
||||||
}),
|
|
||||||
})))
|
|
||||||
}
|
|
||||||
/// BLOCKING
|
|
||||||
#[instrument]
|
|
||||||
pub fn developer_key(&self) -> Result<ed25519_dalek::Keypair, Error> {
|
|
||||||
if !self.developer_key_path.exists() {
|
|
||||||
return Err(Error::new(eyre!("Developer Key does not exist! Please run `embassy-sdk init` before running this command."), crate::ErrorKind::Uninitialized));
|
|
||||||
}
|
|
||||||
let pair = <ed25519::KeypairBytes as ed25519::pkcs8::DecodePrivateKey>::from_pkcs8_pem(
|
|
||||||
&std::fs::read_to_string(&self.developer_key_path)?,
|
|
||||||
)
|
|
||||||
.with_kind(crate::ErrorKind::Pem)?;
|
|
||||||
let secret = ed25519_dalek::SecretKey::from_bytes(&pair.secret_key[..])?;
|
|
||||||
let public = if let Some(public) = pair.public_key {
|
|
||||||
ed25519_dalek::PublicKey::from_bytes(&public[..])?
|
|
||||||
} else {
|
|
||||||
(&secret).into()
|
|
||||||
};
|
|
||||||
Ok(ed25519_dalek::Keypair { secret, public })
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl std::ops::Deref for SdkContext {
|
|
||||||
type Target = SdkContextSeed;
|
|
||||||
fn deref(&self) -> &Self::Target {
|
|
||||||
&*self.0
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl Context for SdkContext {}
|
|
||||||
@@ -1,177 +0,0 @@
|
|||||||
use std::net::{IpAddr, SocketAddr};
|
|
||||||
use std::ops::Deref;
|
|
||||||
use std::path::{Path, PathBuf};
|
|
||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
use josekit::jwk::Jwk;
|
|
||||||
use patch_db::json_ptr::JsonPointer;
|
|
||||||
use patch_db::PatchDb;
|
|
||||||
use rpc_toolkit::yajrc::RpcError;
|
|
||||||
use rpc_toolkit::Context;
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
use sqlx::postgres::PgConnectOptions;
|
|
||||||
use sqlx::PgPool;
|
|
||||||
use tokio::fs::File;
|
|
||||||
use tokio::sync::broadcast::Sender;
|
|
||||||
use tokio::sync::RwLock;
|
|
||||||
use tracing::instrument;
|
|
||||||
use url::Host;
|
|
||||||
|
|
||||||
use crate::db::model::Database;
|
|
||||||
use crate::init::{init_postgres, pgloader};
|
|
||||||
use crate::net::tor::os_key;
|
|
||||||
use crate::setup::{password_hash, RecoveryStatus};
|
|
||||||
use crate::util::io::from_yaml_async_reader;
|
|
||||||
use crate::util::AsyncFileExt;
|
|
||||||
use crate::{Error, ResultExt};
|
|
||||||
|
|
||||||
#[derive(Clone, Serialize, Deserialize)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
pub struct SetupResult {
|
|
||||||
pub tor_address: String,
|
|
||||||
pub lan_address: String,
|
|
||||||
pub root_ca: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Default, Deserialize)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
pub struct SetupContextConfig {
|
|
||||||
pub migration_batch_rows: Option<usize>,
|
|
||||||
pub migration_prefetch_rows: Option<usize>,
|
|
||||||
pub bind_rpc: Option<SocketAddr>,
|
|
||||||
pub datadir: Option<PathBuf>,
|
|
||||||
}
|
|
||||||
impl SetupContextConfig {
|
|
||||||
#[instrument(skip(path))]
|
|
||||||
pub async fn load<P: AsRef<Path>>(path: Option<P>) -> Result<Self, Error> {
|
|
||||||
let cfg_path = path
|
|
||||||
.as_ref()
|
|
||||||
.map(|p| p.as_ref())
|
|
||||||
.unwrap_or(Path::new(crate::util::config::CONFIG_PATH));
|
|
||||||
if let Some(f) = File::maybe_open(cfg_path)
|
|
||||||
.await
|
|
||||||
.with_ctx(|_| (crate::ErrorKind::Filesystem, cfg_path.display().to_string()))?
|
|
||||||
{
|
|
||||||
from_yaml_async_reader(f).await
|
|
||||||
} else {
|
|
||||||
Ok(Self::default())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
pub fn datadir(&self) -> &Path {
|
|
||||||
self.datadir
|
|
||||||
.as_deref()
|
|
||||||
.unwrap_or_else(|| Path::new("/embassy-data"))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct SetupContextSeed {
|
|
||||||
pub config_path: Option<PathBuf>,
|
|
||||||
pub migration_batch_rows: usize,
|
|
||||||
pub migration_prefetch_rows: usize,
|
|
||||||
pub bind_rpc: SocketAddr,
|
|
||||||
pub shutdown: Sender<()>,
|
|
||||||
pub datadir: PathBuf,
|
|
||||||
/// Used to encrypt for hidding from snoopers for setups create password
|
|
||||||
/// Set via path
|
|
||||||
pub current_secret: Arc<Jwk>,
|
|
||||||
pub selected_v2_drive: RwLock<Option<PathBuf>>,
|
|
||||||
pub cached_product_key: RwLock<Option<Arc<String>>>,
|
|
||||||
pub recovery_status: RwLock<Option<Result<RecoveryStatus, RpcError>>>,
|
|
||||||
pub setup_result: RwLock<Option<(Arc<String>, SetupResult)>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl AsRef<Jwk> for SetupContextSeed {
|
|
||||||
fn as_ref(&self) -> &Jwk {
|
|
||||||
&self.current_secret
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone)]
|
|
||||||
pub struct SetupContext(Arc<SetupContextSeed>);
|
|
||||||
impl SetupContext {
|
|
||||||
#[instrument(skip(path))]
|
|
||||||
pub async fn init<P: AsRef<Path>>(path: Option<P>) -> Result<Self, Error> {
|
|
||||||
let cfg = SetupContextConfig::load(path.as_ref()).await?;
|
|
||||||
let (shutdown, _) = tokio::sync::broadcast::channel(1);
|
|
||||||
let datadir = cfg.datadir().to_owned();
|
|
||||||
Ok(Self(Arc::new(SetupContextSeed {
|
|
||||||
config_path: path.as_ref().map(|p| p.as_ref().to_owned()),
|
|
||||||
migration_batch_rows: cfg.migration_batch_rows.unwrap_or(25000),
|
|
||||||
migration_prefetch_rows: cfg.migration_prefetch_rows.unwrap_or(100_000),
|
|
||||||
bind_rpc: cfg.bind_rpc.unwrap_or(([127, 0, 0, 1], 5959).into()),
|
|
||||||
shutdown,
|
|
||||||
datadir,
|
|
||||||
current_secret: Arc::new(
|
|
||||||
Jwk::generate_ec_key(josekit::jwk::alg::ec::EcCurve::P256).map_err(|e| {
|
|
||||||
tracing::debug!("{:?}", e);
|
|
||||||
tracing::error!("Couldn't generate ec key");
|
|
||||||
Error::new(
|
|
||||||
color_eyre::eyre::eyre!("Couldn't generate ec key"),
|
|
||||||
crate::ErrorKind::Unknown,
|
|
||||||
)
|
|
||||||
})?,
|
|
||||||
),
|
|
||||||
selected_v2_drive: RwLock::new(None),
|
|
||||||
cached_product_key: RwLock::new(None),
|
|
||||||
recovery_status: RwLock::new(None),
|
|
||||||
setup_result: RwLock::new(None),
|
|
||||||
})))
|
|
||||||
}
|
|
||||||
#[instrument(skip(self))]
|
|
||||||
pub async fn db(&self, secret_store: &PgPool) -> Result<PatchDb, Error> {
|
|
||||||
let db_path = self.datadir.join("main").join("embassy.db");
|
|
||||||
let db = PatchDb::open(&db_path)
|
|
||||||
.await
|
|
||||||
.with_ctx(|_| (crate::ErrorKind::Filesystem, db_path.display().to_string()))?;
|
|
||||||
if !db.exists(&<JsonPointer>::default()).await {
|
|
||||||
db.put(
|
|
||||||
&<JsonPointer>::default(),
|
|
||||||
&Database::init(
|
|
||||||
&os_key(&mut secret_store.acquire().await?).await?,
|
|
||||||
password_hash(&mut secret_store.acquire().await?).await?,
|
|
||||||
),
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
}
|
|
||||||
Ok(db)
|
|
||||||
}
|
|
||||||
#[instrument(skip(self))]
|
|
||||||
pub async fn secret_store(&self) -> Result<PgPool, Error> {
|
|
||||||
init_postgres(&self.datadir).await?;
|
|
||||||
let secret_store =
|
|
||||||
PgPool::connect_with(PgConnectOptions::new().database("secrets").username("root"))
|
|
||||||
.await?;
|
|
||||||
sqlx::migrate!()
|
|
||||||
.run(&secret_store)
|
|
||||||
.await
|
|
||||||
.with_kind(crate::ErrorKind::Database)?;
|
|
||||||
let old_db_path = self.datadir.join("main/secrets.db");
|
|
||||||
if tokio::fs::metadata(&old_db_path).await.is_ok() {
|
|
||||||
pgloader(
|
|
||||||
&old_db_path,
|
|
||||||
self.migration_batch_rows,
|
|
||||||
self.migration_prefetch_rows,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
}
|
|
||||||
Ok(secret_store)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Context for SetupContext {
|
|
||||||
fn host(&self) -> Host<&str> {
|
|
||||||
match self.0.bind_rpc.ip() {
|
|
||||||
IpAddr::V4(a) => Host::Ipv4(a),
|
|
||||||
IpAddr::V6(a) => Host::Ipv6(a),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
fn port(&self) -> u16 {
|
|
||||||
self.0.bind_rpc.port()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl Deref for SetupContext {
|
|
||||||
type Target = SetupContextSeed;
|
|
||||||
fn deref(&self) -> &Self::Target {
|
|
||||||
&*self.0
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,207 +0,0 @@
|
|||||||
use std::collections::BTreeMap;
|
|
||||||
|
|
||||||
use color_eyre::eyre::eyre;
|
|
||||||
use patch_db::{DbHandle, LockReceipt, LockType};
|
|
||||||
use rpc_toolkit::command;
|
|
||||||
use tracing::instrument;
|
|
||||||
|
|
||||||
use crate::context::RpcContext;
|
|
||||||
use crate::dependencies::{
|
|
||||||
break_all_dependents_transitive, heal_all_dependents_transitive, BreakageRes, DependencyError,
|
|
||||||
DependencyReceipt, TaggedDependencyError,
|
|
||||||
};
|
|
||||||
use crate::s9pk::manifest::PackageId;
|
|
||||||
use crate::status::MainStatus;
|
|
||||||
use crate::util::display_none;
|
|
||||||
use crate::util::serde::display_serializable;
|
|
||||||
use crate::Error;
|
|
||||||
|
|
||||||
#[derive(Clone)]
|
|
||||||
pub struct StartReceipts {
|
|
||||||
dependency_receipt: DependencyReceipt,
|
|
||||||
status: LockReceipt<MainStatus, ()>,
|
|
||||||
version: LockReceipt<crate::util::Version, ()>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl StartReceipts {
|
|
||||||
pub async fn new(db: &mut impl DbHandle, id: &PackageId) -> Result<Self, Error> {
|
|
||||||
let mut locks = Vec::new();
|
|
||||||
|
|
||||||
let setup = Self::setup(&mut locks, id);
|
|
||||||
Ok(setup(&db.lock_all(locks).await?)?)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn setup(
|
|
||||||
locks: &mut Vec<patch_db::LockTargetId>,
|
|
||||||
id: &PackageId,
|
|
||||||
) -> impl FnOnce(&patch_db::Verifier) -> Result<Self, Error> {
|
|
||||||
let dependency_receipt = DependencyReceipt::setup(locks);
|
|
||||||
let status = crate::db::DatabaseModel::new()
|
|
||||||
.package_data()
|
|
||||||
.idx_model(id)
|
|
||||||
.and_then(|x| x.installed())
|
|
||||||
.map(|x| x.status().main())
|
|
||||||
.make_locker(LockType::Write)
|
|
||||||
.add_to_keys(locks);
|
|
||||||
let version = crate::db::DatabaseModel::new()
|
|
||||||
.package_data()
|
|
||||||
.idx_model(id)
|
|
||||||
.and_then(|x| x.installed())
|
|
||||||
.map(|x| x.manifest().version())
|
|
||||||
.make_locker(LockType::Read)
|
|
||||||
.add_to_keys(locks);
|
|
||||||
move |skeleton_key| {
|
|
||||||
Ok(Self {
|
|
||||||
dependency_receipt: dependency_receipt(skeleton_key)?,
|
|
||||||
status: status.verify(skeleton_key)?,
|
|
||||||
version: version.verify(skeleton_key)?,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[command(display(display_none), metadata(sync_db = true))]
|
|
||||||
#[instrument(skip(ctx))]
|
|
||||||
pub async fn start(#[context] ctx: RpcContext, #[arg] id: PackageId) -> Result<(), Error> {
|
|
||||||
let mut db = ctx.db.handle();
|
|
||||||
let mut tx = db.begin().await?;
|
|
||||||
let receipts = StartReceipts::new(&mut tx, &id).await?;
|
|
||||||
let version = receipts.version.get(&mut tx).await?;
|
|
||||||
receipts
|
|
||||||
.status
|
|
||||||
.set(&mut tx, MainStatus::Starting { restarting: false })
|
|
||||||
.await?;
|
|
||||||
heal_all_dependents_transitive(&ctx, &mut tx, &id, &receipts.dependency_receipt).await?;
|
|
||||||
|
|
||||||
tx.commit().await?;
|
|
||||||
drop(receipts);
|
|
||||||
|
|
||||||
ctx.managers
|
|
||||||
.get(&(id, version))
|
|
||||||
.await
|
|
||||||
.ok_or_else(|| Error::new(eyre!("Manager not found"), crate::ErrorKind::InvalidRequest))?
|
|
||||||
.synchronize()
|
|
||||||
.await;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
#[derive(Clone)]
|
|
||||||
pub struct StopReceipts {
|
|
||||||
breaks: crate::dependencies::BreakTransitiveReceipts,
|
|
||||||
status: LockReceipt<MainStatus, ()>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl StopReceipts {
|
|
||||||
pub async fn new<'a>(db: &'a mut impl DbHandle, id: &PackageId) -> Result<Self, Error> {
|
|
||||||
let mut locks = Vec::new();
|
|
||||||
|
|
||||||
let setup = Self::setup(&mut locks, id);
|
|
||||||
Ok(setup(&db.lock_all(locks).await?)?)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn setup(
|
|
||||||
locks: &mut Vec<patch_db::LockTargetId>,
|
|
||||||
id: &PackageId,
|
|
||||||
) -> impl FnOnce(&patch_db::Verifier) -> Result<Self, Error> {
|
|
||||||
let breaks = crate::dependencies::BreakTransitiveReceipts::setup(locks);
|
|
||||||
let status = crate::db::DatabaseModel::new()
|
|
||||||
.package_data()
|
|
||||||
.idx_model(id)
|
|
||||||
.and_then(|x| x.installed())
|
|
||||||
.map(|x| x.status().main())
|
|
||||||
.make_locker(LockType::Write)
|
|
||||||
.add_to_keys(locks);
|
|
||||||
move |skeleton_key| {
|
|
||||||
Ok(Self {
|
|
||||||
breaks: breaks(skeleton_key)?,
|
|
||||||
status: status.verify(skeleton_key)?,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[instrument(skip(db))]
|
|
||||||
async fn stop_common<Db: DbHandle>(
|
|
||||||
db: &mut Db,
|
|
||||||
id: &PackageId,
|
|
||||||
breakages: &mut BTreeMap<PackageId, TaggedDependencyError>,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
let mut tx = db.begin().await?;
|
|
||||||
let receipts = StopReceipts::new(&mut tx, id).await?;
|
|
||||||
receipts.status.set(&mut tx, MainStatus::Stopping).await?;
|
|
||||||
|
|
||||||
tx.save().await?;
|
|
||||||
break_all_dependents_transitive(
|
|
||||||
db,
|
|
||||||
id,
|
|
||||||
DependencyError::NotRunning,
|
|
||||||
breakages,
|
|
||||||
&receipts.breaks,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[command(
|
|
||||||
subcommands(self(stop_impl(async)), stop_dry),
|
|
||||||
display(display_none),
|
|
||||||
metadata(sync_db = true)
|
|
||||||
)]
|
|
||||||
pub fn stop(#[arg] id: PackageId) -> Result<PackageId, Error> {
|
|
||||||
Ok(id)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[command(rename = "dry", display(display_serializable))]
|
|
||||||
#[instrument(skip(ctx))]
|
|
||||||
pub async fn stop_dry(
|
|
||||||
#[context] ctx: RpcContext,
|
|
||||||
#[parent_data] id: PackageId,
|
|
||||||
) -> Result<BreakageRes, Error> {
|
|
||||||
let mut db = ctx.db.handle();
|
|
||||||
let mut tx = db.begin().await?;
|
|
||||||
|
|
||||||
let mut breakages = BTreeMap::new();
|
|
||||||
stop_common(&mut tx, &id, &mut breakages).await?;
|
|
||||||
|
|
||||||
tx.abort().await?;
|
|
||||||
|
|
||||||
Ok(BreakageRes(breakages))
|
|
||||||
}
|
|
||||||
|
|
||||||
#[instrument(skip(ctx))]
|
|
||||||
pub async fn stop_impl(ctx: RpcContext, id: PackageId) -> Result<(), Error> {
|
|
||||||
let mut db = ctx.db.handle();
|
|
||||||
let mut tx = db.begin().await?;
|
|
||||||
|
|
||||||
stop_common(&mut tx, &id, &mut BTreeMap::new()).await?;
|
|
||||||
|
|
||||||
tx.commit().await?;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[command(display(display_none), metadata(sync_db = true))]
|
|
||||||
pub async fn restart(#[context] ctx: RpcContext, #[arg] id: PackageId) -> Result<(), Error> {
|
|
||||||
let mut db = ctx.db.handle();
|
|
||||||
let mut tx = db.begin().await?;
|
|
||||||
|
|
||||||
let mut status = crate::db::DatabaseModel::new()
|
|
||||||
.package_data()
|
|
||||||
.idx_model(&id)
|
|
||||||
.and_then(|pde| pde.installed())
|
|
||||||
.map(|i| i.status().main())
|
|
||||||
.get_mut(&mut tx)
|
|
||||||
.await?;
|
|
||||||
if !matches!(&*status, Some(MainStatus::Running { .. })) {
|
|
||||||
return Err(Error::new(
|
|
||||||
eyre!("{} is not running", id),
|
|
||||||
crate::ErrorKind::InvalidRequest,
|
|
||||||
));
|
|
||||||
}
|
|
||||||
*status = Some(MainStatus::Restarting);
|
|
||||||
status.save(&mut tx).await?;
|
|
||||||
tx.commit().await?;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
@@ -1 +0,0 @@
|
|||||||
pub mod rpc_continuations;
|
|
||||||
@@ -1,116 +0,0 @@
|
|||||||
use std::sync::Arc;
|
|
||||||
use std::time::Duration;
|
|
||||||
|
|
||||||
use futures::future::BoxFuture;
|
|
||||||
use futures::FutureExt;
|
|
||||||
use helpers::TimedResource;
|
|
||||||
use hyper::upgrade::Upgraded;
|
|
||||||
use hyper::{Body, Error as HyperError, Request, Response};
|
|
||||||
use rand::RngCore;
|
|
||||||
use tokio::task::JoinError;
|
|
||||||
use tokio_tungstenite::WebSocketStream;
|
|
||||||
|
|
||||||
use crate::{Error, ResultExt};
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, serde::Serialize, serde::Deserialize)]
|
|
||||||
pub struct RequestGuid<T: AsRef<str> = String>(Arc<T>);
|
|
||||||
impl RequestGuid {
|
|
||||||
pub fn new() -> Self {
|
|
||||||
let mut buf = [0; 40];
|
|
||||||
rand::thread_rng().fill_bytes(&mut buf);
|
|
||||||
RequestGuid(Arc::new(base32::encode(
|
|
||||||
base32::Alphabet::RFC4648 { padding: false },
|
|
||||||
&buf,
|
|
||||||
)))
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn from(r: &str) -> Option<RequestGuid> {
|
|
||||||
if r.len() != 64 {
|
|
||||||
return None;
|
|
||||||
}
|
|
||||||
for c in r.chars() {
|
|
||||||
if !(c >= 'A' && c <= 'Z' || c >= '2' && c <= '7') {
|
|
||||||
return None;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Some(RequestGuid(Arc::new(r.to_owned())))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
#[test]
|
|
||||||
fn parse_guid() {
|
|
||||||
println!(
|
|
||||||
"{:?}",
|
|
||||||
RequestGuid::from(&format!("{}", RequestGuid::new()))
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: AsRef<str>> std::fmt::Display for RequestGuid<T> {
|
|
||||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
|
||||||
(&*self.0).as_ref().fmt(f)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub type RestHandler = Box<
|
|
||||||
dyn FnOnce(Request<Body>) -> BoxFuture<'static, Result<Response<Body>, crate::Error>> + Send,
|
|
||||||
>;
|
|
||||||
|
|
||||||
pub type WebSocketHandler = Box<
|
|
||||||
dyn FnOnce(
|
|
||||||
BoxFuture<'static, Result<Result<WebSocketStream<Upgraded>, HyperError>, JoinError>>,
|
|
||||||
) -> BoxFuture<'static, Result<(), Error>>
|
|
||||||
+ Send,
|
|
||||||
>;
|
|
||||||
|
|
||||||
pub enum RpcContinuation {
|
|
||||||
Rest(TimedResource<RestHandler>),
|
|
||||||
WebSocket(TimedResource<WebSocketHandler>),
|
|
||||||
}
|
|
||||||
impl RpcContinuation {
|
|
||||||
pub fn rest(handler: RestHandler, timeout: Duration) -> Self {
|
|
||||||
RpcContinuation::Rest(TimedResource::new(handler, timeout))
|
|
||||||
}
|
|
||||||
pub fn ws(handler: WebSocketHandler, timeout: Duration) -> Self {
|
|
||||||
RpcContinuation::WebSocket(TimedResource::new(handler, timeout))
|
|
||||||
}
|
|
||||||
pub fn is_timed_out(&self) -> bool {
|
|
||||||
match self {
|
|
||||||
RpcContinuation::Rest(a) => a.is_timed_out(),
|
|
||||||
RpcContinuation::WebSocket(a) => a.is_timed_out(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
pub async fn into_handler(self) -> Option<RestHandler> {
|
|
||||||
match self {
|
|
||||||
RpcContinuation::Rest(handler) => handler.get().await,
|
|
||||||
RpcContinuation::WebSocket(handler) => {
|
|
||||||
if let Some(handler) = handler.get().await {
|
|
||||||
Some(Box::new(
|
|
||||||
|req: Request<Body>| -> BoxFuture<'static, Result<Response<Body>, Error>> {
|
|
||||||
async move {
|
|
||||||
let (parts, body) = req.into_parts();
|
|
||||||
let req = Request::from_parts(parts, body);
|
|
||||||
let (res, ws_fut) = hyper_ws_listener::create_ws(req)
|
|
||||||
.with_kind(crate::ErrorKind::Network)?;
|
|
||||||
if let Some(ws_fut) = ws_fut {
|
|
||||||
tokio::task::spawn(async move {
|
|
||||||
match handler(ws_fut.boxed()).await {
|
|
||||||
Ok(()) => (),
|
|
||||||
Err(e) => {
|
|
||||||
tracing::error!("WebSocket Closed: {}", e);
|
|
||||||
tracing::debug!("{:?}", e);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(res)
|
|
||||||
}
|
|
||||||
.boxed()
|
|
||||||
},
|
|
||||||
))
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,223 +0,0 @@
|
|||||||
pub mod model;
|
|
||||||
pub mod package;
|
|
||||||
|
|
||||||
use std::future::Future;
|
|
||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
use futures::{FutureExt, SinkExt, StreamExt};
|
|
||||||
use patch_db::json_ptr::JsonPointer;
|
|
||||||
use patch_db::{Dump, Revision};
|
|
||||||
use rpc_toolkit::command;
|
|
||||||
use rpc_toolkit::hyper::upgrade::Upgraded;
|
|
||||||
use rpc_toolkit::hyper::{Body, Error as HyperError, Request, Response};
|
|
||||||
use rpc_toolkit::yajrc::RpcError;
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
use serde_json::Value;
|
|
||||||
use tokio::sync::oneshot;
|
|
||||||
use tokio::task::JoinError;
|
|
||||||
use tokio_tungstenite::tungstenite::protocol::frame::coding::CloseCode;
|
|
||||||
use tokio_tungstenite::tungstenite::protocol::CloseFrame;
|
|
||||||
use tokio_tungstenite::tungstenite::Message;
|
|
||||||
use tokio_tungstenite::WebSocketStream;
|
|
||||||
use tracing::instrument;
|
|
||||||
|
|
||||||
pub use self::model::DatabaseModel;
|
|
||||||
use crate::context::RpcContext;
|
|
||||||
use crate::middleware::auth::{HasValidSession, HashSessionToken};
|
|
||||||
use crate::util::serde::{display_serializable, IoFormat};
|
|
||||||
use crate::{Error, ResultExt};
|
|
||||||
|
|
||||||
#[instrument(skip(ctx, session, ws_fut))]
|
|
||||||
async fn ws_handler<
|
|
||||||
WSFut: Future<Output = Result<Result<WebSocketStream<Upgraded>, HyperError>, JoinError>>,
|
|
||||||
>(
|
|
||||||
ctx: RpcContext,
|
|
||||||
session: Option<(HasValidSession, HashSessionToken)>,
|
|
||||||
ws_fut: WSFut,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
let (dump, sub) = ctx.db.dump_and_sub().await?;
|
|
||||||
let mut stream = ws_fut
|
|
||||||
.await
|
|
||||||
.with_kind(crate::ErrorKind::Network)?
|
|
||||||
.with_kind(crate::ErrorKind::Unknown)?;
|
|
||||||
|
|
||||||
if let Some((session, token)) = session {
|
|
||||||
let kill = subscribe_to_session_kill(&ctx, token).await;
|
|
||||||
send_dump(session, &mut stream, dump).await?;
|
|
||||||
|
|
||||||
deal_with_messages(session, kill, sub, stream).await?;
|
|
||||||
} else {
|
|
||||||
stream
|
|
||||||
.close(Some(CloseFrame {
|
|
||||||
code: CloseCode::Error,
|
|
||||||
reason: "UNAUTHORIZED".into(),
|
|
||||||
}))
|
|
||||||
.await
|
|
||||||
.with_kind(crate::ErrorKind::Network)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn subscribe_to_session_kill(
|
|
||||||
ctx: &RpcContext,
|
|
||||||
token: HashSessionToken,
|
|
||||||
) -> oneshot::Receiver<()> {
|
|
||||||
let (send, recv) = oneshot::channel();
|
|
||||||
let mut guard = ctx.open_authed_websockets.lock().await;
|
|
||||||
if !guard.contains_key(&token) {
|
|
||||||
guard.insert(token, vec![send]);
|
|
||||||
} else {
|
|
||||||
guard.get_mut(&token).unwrap().push(send);
|
|
||||||
}
|
|
||||||
recv
|
|
||||||
}
|
|
||||||
|
|
||||||
#[instrument(skip(_has_valid_authentication, kill, sub, stream))]
|
|
||||||
async fn deal_with_messages(
|
|
||||||
_has_valid_authentication: HasValidSession,
|
|
||||||
mut kill: oneshot::Receiver<()>,
|
|
||||||
mut sub: patch_db::Subscriber,
|
|
||||||
mut stream: WebSocketStream<Upgraded>,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
loop {
|
|
||||||
futures::select! {
|
|
||||||
_ = (&mut kill).fuse() => {
|
|
||||||
tracing::info!("Closing WebSocket: Reason: Session Terminated");
|
|
||||||
stream
|
|
||||||
.close(Some(CloseFrame {
|
|
||||||
code: CloseCode::Error,
|
|
||||||
reason: "UNAUTHORIZED".into(),
|
|
||||||
}))
|
|
||||||
.await
|
|
||||||
.with_kind(crate::ErrorKind::Network)?;
|
|
||||||
return Ok(())
|
|
||||||
}
|
|
||||||
new_rev = sub.recv().fuse() => {
|
|
||||||
let rev = new_rev.expect("UNREACHABLE: patch-db is dropped");
|
|
||||||
stream
|
|
||||||
.send(Message::Text(serde_json::to_string(&rev).with_kind(crate::ErrorKind::Serialization)?))
|
|
||||||
.await
|
|
||||||
.with_kind(crate::ErrorKind::Network)?;
|
|
||||||
}
|
|
||||||
message = stream.next().fuse() => {
|
|
||||||
let message = message.transpose().with_kind(crate::ErrorKind::Network)?;
|
|
||||||
match message {
|
|
||||||
None => {
|
|
||||||
tracing::info!("Closing WebSocket: Stream Finished");
|
|
||||||
return Ok(())
|
|
||||||
}
|
|
||||||
_ => (),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn send_dump(
|
|
||||||
_has_valid_authentication: HasValidSession,
|
|
||||||
stream: &mut WebSocketStream<Upgraded>,
|
|
||||||
dump: Dump,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
stream
|
|
||||||
.send(Message::Text(
|
|
||||||
serde_json::to_string(&dump).with_kind(crate::ErrorKind::Serialization)?,
|
|
||||||
))
|
|
||||||
.await
|
|
||||||
.with_kind(crate::ErrorKind::Network)?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn subscribe(ctx: RpcContext, req: Request<Body>) -> Result<Response<Body>, Error> {
|
|
||||||
let (parts, body) = req.into_parts();
|
|
||||||
let session = match async {
|
|
||||||
let token = HashSessionToken::from_request_parts(&parts)?;
|
|
||||||
let session = HasValidSession::from_session(&token, &ctx).await?;
|
|
||||||
Ok::<_, Error>((session, token))
|
|
||||||
}
|
|
||||||
.await
|
|
||||||
{
|
|
||||||
Ok(a) => Some(a),
|
|
||||||
Err(e) => {
|
|
||||||
if e.kind != crate::ErrorKind::Authorization {
|
|
||||||
tracing::error!("Error Authenticating Websocket: {}", e);
|
|
||||||
tracing::debug!("{:?}", e);
|
|
||||||
}
|
|
||||||
None
|
|
||||||
}
|
|
||||||
};
|
|
||||||
let req = Request::from_parts(parts, body);
|
|
||||||
let (res, ws_fut) = hyper_ws_listener::create_ws(req).with_kind(crate::ErrorKind::Network)?;
|
|
||||||
if let Some(ws_fut) = ws_fut {
|
|
||||||
tokio::task::spawn(async move {
|
|
||||||
match ws_handler(ctx, session, ws_fut).await {
|
|
||||||
Ok(()) => (),
|
|
||||||
Err(e) => {
|
|
||||||
tracing::error!("WebSocket Closed: {}", e);
|
|
||||||
tracing::debug!("{:?}", e);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(res)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[command(subcommands(revisions, dump, put))]
|
|
||||||
pub fn db() -> Result<(), RpcError> {
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Deserialize, Serialize)]
|
|
||||||
#[serde(untagged)]
|
|
||||||
pub enum RevisionsRes {
|
|
||||||
Revisions(Vec<Arc<Revision>>),
|
|
||||||
Dump(Dump),
|
|
||||||
}
|
|
||||||
|
|
||||||
#[command(display(display_serializable))]
|
|
||||||
pub async fn revisions(
|
|
||||||
#[context] ctx: RpcContext,
|
|
||||||
#[arg] since: u64,
|
|
||||||
#[allow(unused_variables)]
|
|
||||||
#[arg(long = "format")]
|
|
||||||
format: Option<IoFormat>,
|
|
||||||
) -> Result<RevisionsRes, Error> {
|
|
||||||
Ok(match ctx.db.sync(since).await? {
|
|
||||||
Ok(revs) => RevisionsRes::Revisions(revs),
|
|
||||||
Err(dump) => RevisionsRes::Dump(dump),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
#[command(display(display_serializable))]
|
|
||||||
pub async fn dump(
|
|
||||||
#[context] ctx: RpcContext,
|
|
||||||
#[allow(unused_variables)]
|
|
||||||
#[arg(long = "format")]
|
|
||||||
format: Option<IoFormat>,
|
|
||||||
) -> Result<Dump, Error> {
|
|
||||||
Ok(ctx.db.dump().await?)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[command(subcommands(ui))]
|
|
||||||
pub fn put() -> Result<(), RpcError> {
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[command(display(display_serializable))]
|
|
||||||
#[instrument(skip(ctx))]
|
|
||||||
pub async fn ui(
|
|
||||||
#[context] ctx: RpcContext,
|
|
||||||
#[arg] pointer: JsonPointer,
|
|
||||||
#[arg] value: Value,
|
|
||||||
#[allow(unused_variables)]
|
|
||||||
#[arg(long = "format")]
|
|
||||||
format: Option<IoFormat>,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
let ptr = "/ui"
|
|
||||||
.parse::<JsonPointer>()
|
|
||||||
.with_kind(crate::ErrorKind::Database)?
|
|
||||||
+ &pointer;
|
|
||||||
ctx.db.put(&ptr, &value).await?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
@@ -1,373 +0,0 @@
|
|||||||
use std::collections::{BTreeMap, BTreeSet};
|
|
||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
use chrono::{DateTime, Utc};
|
|
||||||
use emver::VersionRange;
|
|
||||||
use isocountry::CountryCode;
|
|
||||||
use patch_db::json_ptr::JsonPointer;
|
|
||||||
use patch_db::{HasModel, Map, MapModel, OptionModel};
|
|
||||||
use reqwest::Url;
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
use serde_json::Value;
|
|
||||||
use torut::onion::TorSecretKeyV3;
|
|
||||||
|
|
||||||
use crate::config::spec::{PackagePointerSpec, SystemPointerSpec};
|
|
||||||
use crate::hostname::{generate_hostname, generate_id};
|
|
||||||
use crate::install::progress::InstallProgress;
|
|
||||||
use crate::net::interface::InterfaceId;
|
|
||||||
use crate::s9pk::manifest::{Manifest, ManifestModel, PackageId};
|
|
||||||
use crate::status::health_check::HealthCheckId;
|
|
||||||
use crate::status::Status;
|
|
||||||
use crate::util::Version;
|
|
||||||
use crate::version::{Current, VersionT};
|
|
||||||
|
|
||||||
#[derive(Debug, Deserialize, Serialize, HasModel)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
pub struct Database {
|
|
||||||
#[model]
|
|
||||||
pub server_info: ServerInfo,
|
|
||||||
#[model]
|
|
||||||
pub package_data: AllPackageData,
|
|
||||||
#[model]
|
|
||||||
pub recovered_packages: BTreeMap<PackageId, RecoveredPackageInfo>,
|
|
||||||
pub ui: Value,
|
|
||||||
}
|
|
||||||
impl Database {
|
|
||||||
pub fn init(tor_key: &TorSecretKeyV3, password_hash: String) -> Self {
|
|
||||||
let id = generate_id();
|
|
||||||
let my_hostname = generate_hostname();
|
|
||||||
let lan_address = my_hostname.lan_address().parse().unwrap();
|
|
||||||
// TODO
|
|
||||||
Database {
|
|
||||||
server_info: ServerInfo {
|
|
||||||
id,
|
|
||||||
version: Current::new().semver().into(),
|
|
||||||
hostname: Some(my_hostname.0),
|
|
||||||
last_backup: None,
|
|
||||||
last_wifi_region: None,
|
|
||||||
eos_version_compat: Current::new().compat().clone(),
|
|
||||||
lan_address,
|
|
||||||
tor_address: format!("http://{}", tor_key.public().get_onion_address())
|
|
||||||
.parse()
|
|
||||||
.unwrap(),
|
|
||||||
status_info: ServerStatus {
|
|
||||||
backup_progress: None,
|
|
||||||
updated: false,
|
|
||||||
update_progress: None,
|
|
||||||
},
|
|
||||||
wifi: WifiInfo {
|
|
||||||
ssids: Vec::new(),
|
|
||||||
connected: None,
|
|
||||||
selected: None,
|
|
||||||
},
|
|
||||||
unread_notification_count: 0,
|
|
||||||
connection_addresses: ConnectionAddresses {
|
|
||||||
tor: Vec::new(),
|
|
||||||
clearnet: Vec::new(),
|
|
||||||
},
|
|
||||||
password_hash,
|
|
||||||
},
|
|
||||||
package_data: AllPackageData::default(),
|
|
||||||
recovered_packages: BTreeMap::new(),
|
|
||||||
ui: serde_json::from_str(include_str!("../../../frontend/patchdb-ui-seed.json"))
|
|
||||||
.unwrap(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl DatabaseModel {
|
|
||||||
pub fn new() -> Self {
|
|
||||||
Self::from(JsonPointer::default())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Deserialize, Serialize, HasModel)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
pub struct ServerInfo {
|
|
||||||
pub id: String,
|
|
||||||
pub hostname: Option<String>,
|
|
||||||
pub version: Version,
|
|
||||||
pub last_backup: Option<DateTime<Utc>>,
|
|
||||||
/// Used in the wifi to determine the region to set the system to
|
|
||||||
pub last_wifi_region: Option<CountryCode>,
|
|
||||||
pub eos_version_compat: VersionRange,
|
|
||||||
pub lan_address: Url,
|
|
||||||
pub tor_address: Url,
|
|
||||||
#[model]
|
|
||||||
#[serde(default)]
|
|
||||||
pub status_info: ServerStatus,
|
|
||||||
pub wifi: WifiInfo,
|
|
||||||
pub unread_notification_count: u64,
|
|
||||||
pub connection_addresses: ConnectionAddresses,
|
|
||||||
pub password_hash: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Default, Deserialize, Serialize, HasModel)]
|
|
||||||
pub struct BackupProgress {
|
|
||||||
pub complete: bool,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Default, Deserialize, Serialize, HasModel)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
pub struct ServerStatus {
|
|
||||||
#[model]
|
|
||||||
pub backup_progress: Option<BTreeMap<PackageId, BackupProgress>>,
|
|
||||||
pub updated: bool,
|
|
||||||
#[model]
|
|
||||||
pub update_progress: Option<UpdateProgress>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Deserialize, Serialize, HasModel)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
pub struct UpdateProgress {
|
|
||||||
pub size: Option<u64>,
|
|
||||||
pub downloaded: u64,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Deserialize, Serialize)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
pub struct WifiInfo {
|
|
||||||
pub ssids: Vec<String>,
|
|
||||||
pub selected: Option<String>,
|
|
||||||
pub connected: Option<String>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Deserialize, Serialize)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
pub struct ServerSpecs {
|
|
||||||
pub cpu: String,
|
|
||||||
pub disk: String,
|
|
||||||
pub memory: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Deserialize, Serialize)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
pub struct ConnectionAddresses {
|
|
||||||
pub tor: Vec<String>,
|
|
||||||
pub clearnet: Vec<String>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Default, Deserialize, Serialize)]
|
|
||||||
pub struct AllPackageData(pub BTreeMap<PackageId, PackageDataEntry>);
|
|
||||||
impl Map for AllPackageData {
|
|
||||||
type Key = PackageId;
|
|
||||||
type Value = PackageDataEntry;
|
|
||||||
fn get(&self, key: &Self::Key) -> Option<&Self::Value> {
|
|
||||||
self.0.get(key)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl HasModel for AllPackageData {
|
|
||||||
type Model = MapModel<Self>;
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Deserialize, Serialize)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
pub struct StaticFiles {
|
|
||||||
license: String,
|
|
||||||
instructions: String,
|
|
||||||
icon: String,
|
|
||||||
}
|
|
||||||
impl StaticFiles {
|
|
||||||
pub fn local(id: &PackageId, version: &Version, icon_type: &str) -> Self {
|
|
||||||
StaticFiles {
|
|
||||||
license: format!("/public/package-data/{}/{}/LICENSE.md", id, version),
|
|
||||||
instructions: format!("/public/package-data/{}/{}/INSTRUCTIONS.md", id, version),
|
|
||||||
icon: format!("/public/package-data/{}/{}/icon.{}", id, version, icon_type),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Deserialize, Serialize, HasModel)]
|
|
||||||
#[serde(tag = "state")]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
pub enum PackageDataEntry {
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
Installing {
|
|
||||||
static_files: StaticFiles,
|
|
||||||
manifest: Manifest,
|
|
||||||
install_progress: Arc<InstallProgress>,
|
|
||||||
},
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
Updating {
|
|
||||||
static_files: StaticFiles,
|
|
||||||
manifest: Manifest,
|
|
||||||
installed: InstalledPackageDataEntry,
|
|
||||||
install_progress: Arc<InstallProgress>,
|
|
||||||
},
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
Restoring {
|
|
||||||
static_files: StaticFiles,
|
|
||||||
manifest: Manifest,
|
|
||||||
install_progress: Arc<InstallProgress>,
|
|
||||||
},
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
Removing {
|
|
||||||
static_files: StaticFiles,
|
|
||||||
manifest: Manifest,
|
|
||||||
removing: InstalledPackageDataEntry,
|
|
||||||
},
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
Installed {
|
|
||||||
static_files: StaticFiles,
|
|
||||||
manifest: Manifest,
|
|
||||||
installed: InstalledPackageDataEntry,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
impl PackageDataEntry {
|
|
||||||
pub fn installed(&self) -> Option<&InstalledPackageDataEntry> {
|
|
||||||
match self {
|
|
||||||
Self::Installing { .. } | Self::Restoring { .. } | Self::Removing { .. } => None,
|
|
||||||
Self::Updating { installed, .. } | Self::Installed { installed, .. } => Some(installed),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
pub fn installed_mut(&mut self) -> Option<&mut InstalledPackageDataEntry> {
|
|
||||||
match self {
|
|
||||||
Self::Installing { .. } | Self::Restoring { .. } | Self::Removing { .. } => None,
|
|
||||||
Self::Updating { installed, .. } | Self::Installed { installed, .. } => Some(installed),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
pub fn into_installed(self) -> Option<InstalledPackageDataEntry> {
|
|
||||||
match self {
|
|
||||||
Self::Installing { .. } | Self::Restoring { .. } | Self::Removing { .. } => None,
|
|
||||||
Self::Updating { installed, .. } | Self::Installed { installed, .. } => Some(installed),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
pub fn manifest(self) -> Manifest {
|
|
||||||
match self {
|
|
||||||
PackageDataEntry::Installing { manifest, .. } => manifest,
|
|
||||||
PackageDataEntry::Updating { manifest, .. } => manifest,
|
|
||||||
PackageDataEntry::Restoring { manifest, .. } => manifest,
|
|
||||||
PackageDataEntry::Removing { manifest, .. } => manifest,
|
|
||||||
PackageDataEntry::Installed { manifest, .. } => manifest,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl PackageDataEntryModel {
|
|
||||||
pub fn installed(self) -> OptionModel<InstalledPackageDataEntry> {
|
|
||||||
self.0.child("installed").into()
|
|
||||||
}
|
|
||||||
pub fn removing(self) -> OptionModel<InstalledPackageDataEntry> {
|
|
||||||
self.0.child("removing").into()
|
|
||||||
}
|
|
||||||
pub fn install_progress(self) -> OptionModel<InstallProgress> {
|
|
||||||
self.0.child("install-progress").into()
|
|
||||||
}
|
|
||||||
pub fn manifest(self) -> ManifestModel {
|
|
||||||
self.0.child("manifest").into()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Deserialize, Serialize, HasModel)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
pub struct InstalledPackageDataEntry {
|
|
||||||
#[model]
|
|
||||||
pub status: Status,
|
|
||||||
pub marketplace_url: Option<Url>,
|
|
||||||
#[serde(default)]
|
|
||||||
#[serde(with = "crate::util::serde::ed25519_pubkey")]
|
|
||||||
pub developer_key: ed25519_dalek::PublicKey,
|
|
||||||
#[model]
|
|
||||||
pub manifest: Manifest,
|
|
||||||
pub last_backup: Option<DateTime<Utc>>,
|
|
||||||
#[model]
|
|
||||||
pub system_pointers: Vec<SystemPointerSpec>,
|
|
||||||
#[model]
|
|
||||||
pub dependency_info: BTreeMap<PackageId, StaticDependencyInfo>,
|
|
||||||
#[model]
|
|
||||||
pub current_dependents: CurrentDependents,
|
|
||||||
#[model]
|
|
||||||
pub current_dependencies: CurrentDependencies,
|
|
||||||
#[model]
|
|
||||||
pub interface_addresses: InterfaceAddressMap,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, Deserialize, Serialize)]
|
|
||||||
pub struct CurrentDependents(pub BTreeMap<PackageId, CurrentDependencyInfo>);
|
|
||||||
impl CurrentDependents {
|
|
||||||
pub fn map(
|
|
||||||
mut self,
|
|
||||||
transform: impl Fn(
|
|
||||||
BTreeMap<PackageId, CurrentDependencyInfo>,
|
|
||||||
) -> BTreeMap<PackageId, CurrentDependencyInfo>,
|
|
||||||
) -> Self {
|
|
||||||
self.0 = transform(self.0);
|
|
||||||
self
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl Map for CurrentDependents {
|
|
||||||
type Key = PackageId;
|
|
||||||
type Value = CurrentDependencyInfo;
|
|
||||||
fn get(&self, key: &Self::Key) -> Option<&Self::Value> {
|
|
||||||
self.0.get(key)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl HasModel for CurrentDependents {
|
|
||||||
type Model = MapModel<Self>;
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, Default, Deserialize, Serialize)]
|
|
||||||
pub struct CurrentDependencies(pub BTreeMap<PackageId, CurrentDependencyInfo>);
|
|
||||||
impl CurrentDependencies {
|
|
||||||
pub fn map(
|
|
||||||
mut self,
|
|
||||||
transform: impl Fn(
|
|
||||||
BTreeMap<PackageId, CurrentDependencyInfo>,
|
|
||||||
) -> BTreeMap<PackageId, CurrentDependencyInfo>,
|
|
||||||
) -> Self {
|
|
||||||
self.0 = transform(self.0);
|
|
||||||
self
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl Map for CurrentDependencies {
|
|
||||||
type Key = PackageId;
|
|
||||||
type Value = CurrentDependencyInfo;
|
|
||||||
fn get(&self, key: &Self::Key) -> Option<&Self::Value> {
|
|
||||||
self.0.get(key)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl HasModel for CurrentDependencies {
|
|
||||||
type Model = MapModel<Self>;
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone, Debug, Default, Deserialize, Serialize, HasModel)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
pub struct StaticDependencyInfo {
|
|
||||||
pub manifest: Option<Manifest>,
|
|
||||||
pub icon: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone, Debug, Default, Deserialize, Serialize, HasModel)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
pub struct CurrentDependencyInfo {
|
|
||||||
pub pointers: Vec<PackagePointerSpec>,
|
|
||||||
pub health_checks: BTreeSet<HealthCheckId>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Deserialize, Serialize)]
|
|
||||||
pub struct InterfaceAddressMap(pub BTreeMap<InterfaceId, InterfaceAddresses>);
|
|
||||||
impl Map for InterfaceAddressMap {
|
|
||||||
type Key = InterfaceId;
|
|
||||||
type Value = InterfaceAddresses;
|
|
||||||
fn get(&self, key: &Self::Key) -> Option<&Self::Value> {
|
|
||||||
self.0.get(key)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl HasModel for InterfaceAddressMap {
|
|
||||||
type Model = MapModel<Self>;
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Deserialize, Serialize, HasModel)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
pub struct InterfaceAddresses {
|
|
||||||
#[model]
|
|
||||||
pub tor_address: Option<String>,
|
|
||||||
#[model]
|
|
||||||
pub lan_address: Option<String>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Deserialize, Serialize, HasModel)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
pub struct RecoveredPackageInfo {
|
|
||||||
pub title: String,
|
|
||||||
pub icon: String,
|
|
||||||
pub version: Version,
|
|
||||||
}
|
|
||||||
@@ -1,75 +0,0 @@
|
|||||||
use patch_db::{DbHandle, LockReceipt, LockTargetId, LockType, Verifier};
|
|
||||||
|
|
||||||
use crate::s9pk::manifest::{Manifest, PackageId};
|
|
||||||
use crate::Error;
|
|
||||||
|
|
||||||
pub struct PackageReceipts {
|
|
||||||
package_data: LockReceipt<super::model::AllPackageData, ()>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl PackageReceipts {
|
|
||||||
pub async fn new<'a>(db: &'a mut impl DbHandle) -> Result<Self, Error> {
|
|
||||||
let mut locks = Vec::new();
|
|
||||||
|
|
||||||
let setup = Self::setup(&mut locks);
|
|
||||||
Ok(setup(&db.lock_all(locks).await?)?)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn setup(locks: &mut Vec<LockTargetId>) -> impl FnOnce(&Verifier) -> Result<Self, Error> {
|
|
||||||
let package_data = crate::db::DatabaseModel::new()
|
|
||||||
.package_data()
|
|
||||||
.make_locker(LockType::Read)
|
|
||||||
.add_to_keys(locks);
|
|
||||||
move |skeleton_key| {
|
|
||||||
Ok(Self {
|
|
||||||
package_data: package_data.verify(&skeleton_key)?,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn get_packages<Db: DbHandle>(
|
|
||||||
db: &mut Db,
|
|
||||||
receipts: &PackageReceipts,
|
|
||||||
) -> Result<Vec<PackageId>, Error> {
|
|
||||||
let packages = receipts.package_data.get(db).await?;
|
|
||||||
Ok(packages.0.keys().cloned().collect())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct ManifestReceipts {
|
|
||||||
manifest: LockReceipt<Manifest, String>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ManifestReceipts {
|
|
||||||
pub async fn new<'a>(db: &'a mut impl DbHandle, id: &PackageId) -> Result<Self, Error> {
|
|
||||||
let mut locks = Vec::new();
|
|
||||||
|
|
||||||
let setup = Self::setup(&mut locks, id);
|
|
||||||
Ok(setup(&db.lock_all(locks).await?)?)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn setup(
|
|
||||||
locks: &mut Vec<LockTargetId>,
|
|
||||||
_id: &PackageId,
|
|
||||||
) -> impl FnOnce(&Verifier) -> Result<Self, Error> {
|
|
||||||
let manifest = crate::db::DatabaseModel::new()
|
|
||||||
.package_data()
|
|
||||||
.star()
|
|
||||||
.manifest()
|
|
||||||
.make_locker(LockType::Read)
|
|
||||||
.add_to_keys(locks);
|
|
||||||
move |skeleton_key| {
|
|
||||||
Ok(Self {
|
|
||||||
manifest: manifest.verify(&skeleton_key)?,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn get_manifest<Db: DbHandle>(
|
|
||||||
db: &mut Db,
|
|
||||||
pkg: &PackageId,
|
|
||||||
receipts: &ManifestReceipts,
|
|
||||||
) -> Result<Option<Manifest>, Error> {
|
|
||||||
Ok(receipts.manifest.get(db, pkg).await?)
|
|
||||||
}
|
|
||||||
@@ -1,45 +0,0 @@
|
|||||||
use std::fs::File;
|
|
||||||
use std::io::Write;
|
|
||||||
use std::path::Path;
|
|
||||||
|
|
||||||
use ed25519::pkcs8::EncodePrivateKey;
|
|
||||||
use ed25519_dalek::Keypair;
|
|
||||||
use rpc_toolkit::command;
|
|
||||||
use tracing::instrument;
|
|
||||||
|
|
||||||
use crate::context::SdkContext;
|
|
||||||
use crate::util::display_none;
|
|
||||||
use crate::{Error, ResultExt};
|
|
||||||
|
|
||||||
#[command(cli_only, blocking, display(display_none))]
|
|
||||||
#[instrument(skip(ctx))]
|
|
||||||
pub fn init(#[context] ctx: SdkContext) -> Result<(), Error> {
|
|
||||||
if !ctx.developer_key_path.exists() {
|
|
||||||
let parent = ctx.developer_key_path.parent().unwrap_or(Path::new("/"));
|
|
||||||
if !parent.exists() {
|
|
||||||
std::fs::create_dir_all(parent)
|
|
||||||
.with_ctx(|_| (crate::ErrorKind::Filesystem, parent.display().to_string()))?;
|
|
||||||
}
|
|
||||||
tracing::info!("Generating new developer key...");
|
|
||||||
let keypair = Keypair::generate(&mut rand_old::thread_rng());
|
|
||||||
tracing::info!("Writing key to {}", ctx.developer_key_path.display());
|
|
||||||
let keypair_bytes = ed25519::KeypairBytes {
|
|
||||||
secret_key: keypair.secret.to_bytes(),
|
|
||||||
public_key: Some(keypair.public.to_bytes()),
|
|
||||||
};
|
|
||||||
let mut dev_key_file = File::create(&ctx.developer_key_path)?;
|
|
||||||
dev_key_file.write_all(
|
|
||||||
keypair_bytes
|
|
||||||
.to_pkcs8_pem(base64ct::LineEnding::default())
|
|
||||||
.with_kind(crate::ErrorKind::Pem)?
|
|
||||||
.as_bytes(),
|
|
||||||
)?;
|
|
||||||
dev_key_file.sync_all()?;
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[command(subcommands(crate::s9pk::verify, crate::config::verify_spec))]
|
|
||||||
pub fn verify() -> Result<(), Error> {
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
@@ -1,66 +0,0 @@
|
|||||||
use std::path::Path;
|
|
||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
use rpc_toolkit::command;
|
|
||||||
use rpc_toolkit::yajrc::RpcError;
|
|
||||||
|
|
||||||
use crate::context::DiagnosticContext;
|
|
||||||
use crate::disk::repair;
|
|
||||||
use crate::logs::{fetch_logs, LogResponse, LogSource};
|
|
||||||
use crate::shutdown::Shutdown;
|
|
||||||
use crate::util::display_none;
|
|
||||||
use crate::Error;
|
|
||||||
|
|
||||||
pub const SYSTEMD_UNIT: &'static str = "embassy-init";
|
|
||||||
|
|
||||||
#[command(subcommands(error, logs, exit, restart, forget_disk, disk))]
|
|
||||||
pub fn diagnostic() -> Result<(), Error> {
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[command]
|
|
||||||
pub fn error(#[context] ctx: DiagnosticContext) -> Result<Arc<RpcError>, Error> {
|
|
||||||
Ok(ctx.error.clone())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[command(rpc_only)]
|
|
||||||
pub async fn logs(
|
|
||||||
#[arg] limit: Option<usize>,
|
|
||||||
#[arg] cursor: Option<String>,
|
|
||||||
#[arg] before: bool,
|
|
||||||
) -> Result<LogResponse, Error> {
|
|
||||||
Ok(fetch_logs(LogSource::Service(SYSTEMD_UNIT), limit, cursor, before).await?)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[command(display(display_none))]
|
|
||||||
pub fn exit(#[context] ctx: DiagnosticContext) -> Result<(), Error> {
|
|
||||||
ctx.shutdown.send(None).expect("receiver dropped");
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[command(display(display_none))]
|
|
||||||
pub fn restart(#[context] ctx: DiagnosticContext) -> Result<(), Error> {
|
|
||||||
ctx.shutdown
|
|
||||||
.send(Some(Shutdown {
|
|
||||||
datadir: ctx.datadir.clone(),
|
|
||||||
disk_guid: ctx.disk_guid.clone(),
|
|
||||||
db_handle: None,
|
|
||||||
restart: true,
|
|
||||||
}))
|
|
||||||
.expect("receiver dropped");
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[command(subcommands(forget_disk, repair))]
|
|
||||||
pub fn disk() -> Result<(), Error> {
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[command(rename = "forget", display(display_none))]
|
|
||||||
pub async fn forget_disk() -> Result<(), Error> {
|
|
||||||
let disk_guid = Path::new("/embassy-os/disk.guid");
|
|
||||||
if tokio::fs::metadata(disk_guid).await.is_ok() {
|
|
||||||
tokio::fs::remove_file(disk_guid).await?;
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
@@ -1,120 +0,0 @@
|
|||||||
use std::ffi::OsStr;
|
|
||||||
use std::path::Path;
|
|
||||||
|
|
||||||
use color_eyre::eyre::eyre;
|
|
||||||
use futures::future::BoxFuture;
|
|
||||||
use futures::FutureExt;
|
|
||||||
use tokio::process::Command;
|
|
||||||
use tracing::instrument;
|
|
||||||
|
|
||||||
use crate::Error;
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, Copy)]
|
|
||||||
#[must_use]
|
|
||||||
pub struct RequiresReboot(pub bool);
|
|
||||||
impl std::ops::BitOrAssign for RequiresReboot {
|
|
||||||
fn bitor_assign(&mut self, rhs: Self) {
|
|
||||||
self.0 |= rhs.0
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, Copy)]
|
|
||||||
pub enum RepairStrategy {
|
|
||||||
Preen,
|
|
||||||
Aggressive,
|
|
||||||
}
|
|
||||||
impl RepairStrategy {
|
|
||||||
pub async fn e2fsck(
|
|
||||||
&self,
|
|
||||||
logicalname: impl AsRef<Path> + std::fmt::Debug,
|
|
||||||
) -> Result<RequiresReboot, Error> {
|
|
||||||
match self {
|
|
||||||
RepairStrategy::Preen => e2fsck_preen(logicalname).await,
|
|
||||||
RepairStrategy::Aggressive => e2fsck_aggressive(logicalname).await,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[instrument]
|
|
||||||
pub async fn e2fsck_preen(
|
|
||||||
logicalname: impl AsRef<Path> + std::fmt::Debug,
|
|
||||||
) -> Result<RequiresReboot, Error> {
|
|
||||||
e2fsck_runner(Command::new("e2fsck").arg("-p"), logicalname).await
|
|
||||||
}
|
|
||||||
|
|
||||||
fn backup_existing_undo_file<'a>(path: &'a Path) -> BoxFuture<'a, Result<(), Error>> {
|
|
||||||
async move {
|
|
||||||
if tokio::fs::metadata(path).await.is_ok() {
|
|
||||||
let bak = path.with_extension(format!(
|
|
||||||
"{}.bak",
|
|
||||||
path.extension()
|
|
||||||
.and_then(|s| s.to_str())
|
|
||||||
.unwrap_or_default()
|
|
||||||
));
|
|
||||||
backup_existing_undo_file(&bak).await?;
|
|
||||||
tokio::fs::rename(path, &bak).await?;
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
.boxed()
|
|
||||||
}
|
|
||||||
|
|
||||||
#[instrument]
|
|
||||||
pub async fn e2fsck_aggressive(
|
|
||||||
logicalname: impl AsRef<Path> + std::fmt::Debug,
|
|
||||||
) -> Result<RequiresReboot, Error> {
|
|
||||||
let undo_path = Path::new("/embassy-os")
|
|
||||||
.join(
|
|
||||||
logicalname
|
|
||||||
.as_ref()
|
|
||||||
.file_name()
|
|
||||||
.unwrap_or(OsStr::new("unknown")),
|
|
||||||
)
|
|
||||||
.with_extension("e2undo");
|
|
||||||
backup_existing_undo_file(&undo_path).await?;
|
|
||||||
e2fsck_runner(
|
|
||||||
Command::new("e2fsck").arg("-y").arg("-z").arg(undo_path),
|
|
||||||
logicalname,
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn e2fsck_runner(
|
|
||||||
e2fsck_cmd: &mut Command,
|
|
||||||
logicalname: impl AsRef<Path> + std::fmt::Debug,
|
|
||||||
) -> Result<RequiresReboot, Error> {
|
|
||||||
let e2fsck_out = e2fsck_cmd.arg(logicalname.as_ref()).output().await?;
|
|
||||||
let e2fsck_stderr = String::from_utf8(e2fsck_out.stderr)?;
|
|
||||||
let code = e2fsck_out.status.code().ok_or_else(|| {
|
|
||||||
Error::new(
|
|
||||||
eyre!("e2fsck: process terminated by signal"),
|
|
||||||
crate::ErrorKind::DiskManagement,
|
|
||||||
)
|
|
||||||
})?;
|
|
||||||
if code & 4 != 0 {
|
|
||||||
tracing::error!(
|
|
||||||
"some filesystem errors NOT corrected on {}:\n{}",
|
|
||||||
logicalname.as_ref().display(),
|
|
||||||
e2fsck_stderr,
|
|
||||||
);
|
|
||||||
} else if code & 1 != 0 {
|
|
||||||
tracing::warn!(
|
|
||||||
"filesystem errors corrected on {}:\n{}",
|
|
||||||
logicalname.as_ref().display(),
|
|
||||||
e2fsck_stderr,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
if code < 8 {
|
|
||||||
if code & 2 != 0 {
|
|
||||||
tracing::warn!("reboot required");
|
|
||||||
Ok(RequiresReboot(true))
|
|
||||||
} else {
|
|
||||||
Ok(RequiresReboot(false))
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
Err(Error::new(
|
|
||||||
eyre!("e2fsck: {}", e2fsck_stderr),
|
|
||||||
crate::ErrorKind::DiskManagement,
|
|
||||||
))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,299 +0,0 @@
|
|||||||
use std::collections::BTreeMap;
|
|
||||||
use std::path::{Path, PathBuf};
|
|
||||||
|
|
||||||
use color_eyre::eyre::eyre;
|
|
||||||
use tokio::process::Command;
|
|
||||||
use tracing::instrument;
|
|
||||||
|
|
||||||
use super::fsck::{RepairStrategy, RequiresReboot};
|
|
||||||
use super::util::pvscan;
|
|
||||||
use crate::disk::mount::filesystem::block_dev::mount;
|
|
||||||
use crate::disk::mount::filesystem::ReadWrite;
|
|
||||||
use crate::disk::mount::util::unmount;
|
|
||||||
use crate::util::Invoke;
|
|
||||||
use crate::{Error, ErrorKind, ResultExt};
|
|
||||||
|
|
||||||
pub const PASSWORD_PATH: &'static str = "/etc/embassy/password";
|
|
||||||
pub const DEFAULT_PASSWORD: &'static str = "password";
|
|
||||||
pub const MAIN_FS_SIZE: FsSize = FsSize::Gigabytes(8);
|
|
||||||
|
|
||||||
#[instrument(skip(disks, datadir, password))]
|
|
||||||
pub async fn create<I, P>(
|
|
||||||
disks: &I,
|
|
||||||
pvscan: &BTreeMap<PathBuf, Option<String>>,
|
|
||||||
datadir: impl AsRef<Path>,
|
|
||||||
password: &str,
|
|
||||||
) -> Result<String, Error>
|
|
||||||
where
|
|
||||||
for<'a> &'a I: IntoIterator<Item = &'a P>,
|
|
||||||
P: AsRef<Path>,
|
|
||||||
{
|
|
||||||
let guid = create_pool(disks, pvscan).await?;
|
|
||||||
create_all_fs(&guid, &datadir, password).await?;
|
|
||||||
export(&guid, datadir).await?;
|
|
||||||
Ok(guid)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[instrument(skip(disks))]
|
|
||||||
pub async fn create_pool<I, P>(
|
|
||||||
disks: &I,
|
|
||||||
pvscan: &BTreeMap<PathBuf, Option<String>>,
|
|
||||||
) -> Result<String, Error>
|
|
||||||
where
|
|
||||||
for<'a> &'a I: IntoIterator<Item = &'a P>,
|
|
||||||
P: AsRef<Path>,
|
|
||||||
{
|
|
||||||
Command::new("dmsetup")
|
|
||||||
.arg("remove_all") // TODO: find a higher finesse way to do this for portability reasons
|
|
||||||
.invoke(crate::ErrorKind::DiskManagement)
|
|
||||||
.await?;
|
|
||||||
for disk in disks {
|
|
||||||
if pvscan.contains_key(disk.as_ref()) {
|
|
||||||
Command::new("pvremove")
|
|
||||||
.arg("-yff")
|
|
||||||
.arg(disk.as_ref())
|
|
||||||
.invoke(crate::ErrorKind::DiskManagement)
|
|
||||||
.await?;
|
|
||||||
}
|
|
||||||
tokio::fs::write(disk.as_ref(), &[0; 2048]).await?; // wipe partition table
|
|
||||||
Command::new("pvcreate")
|
|
||||||
.arg("-yff")
|
|
||||||
.arg(disk.as_ref())
|
|
||||||
.invoke(crate::ErrorKind::DiskManagement)
|
|
||||||
.await?;
|
|
||||||
}
|
|
||||||
let guid = format!(
|
|
||||||
"EMBASSY_{}",
|
|
||||||
base32::encode(
|
|
||||||
base32::Alphabet::RFC4648 { padding: false },
|
|
||||||
&rand::random::<[u8; 32]>(),
|
|
||||||
)
|
|
||||||
);
|
|
||||||
let mut cmd = Command::new("vgcreate");
|
|
||||||
cmd.arg("-y").arg(&guid);
|
|
||||||
for disk in disks {
|
|
||||||
cmd.arg(disk.as_ref());
|
|
||||||
}
|
|
||||||
cmd.invoke(crate::ErrorKind::DiskManagement).await?;
|
|
||||||
Ok(guid)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, Copy)]
|
|
||||||
pub enum FsSize {
|
|
||||||
Gigabytes(usize),
|
|
||||||
FreePercentage(usize),
|
|
||||||
}
|
|
||||||
|
|
||||||
#[instrument(skip(datadir, password))]
|
|
||||||
pub async fn create_fs<P: AsRef<Path>>(
|
|
||||||
guid: &str,
|
|
||||||
datadir: P,
|
|
||||||
name: &str,
|
|
||||||
size: FsSize,
|
|
||||||
password: &str,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
tokio::fs::write(PASSWORD_PATH, password)
|
|
||||||
.await
|
|
||||||
.with_ctx(|_| (crate::ErrorKind::Filesystem, PASSWORD_PATH))?;
|
|
||||||
let mut cmd = Command::new("lvcreate");
|
|
||||||
match size {
|
|
||||||
FsSize::Gigabytes(a) => cmd.arg("-L").arg(format!("{}G", a)),
|
|
||||||
FsSize::FreePercentage(a) => cmd.arg("-l").arg(format!("{}%FREE", a)),
|
|
||||||
};
|
|
||||||
cmd.arg("-y")
|
|
||||||
.arg("-n")
|
|
||||||
.arg(name)
|
|
||||||
.arg(guid)
|
|
||||||
.invoke(crate::ErrorKind::DiskManagement)
|
|
||||||
.await?;
|
|
||||||
Command::new("cryptsetup")
|
|
||||||
.arg("-q")
|
|
||||||
.arg("luksFormat")
|
|
||||||
.arg(format!("--key-file={}", PASSWORD_PATH))
|
|
||||||
.arg(format!("--keyfile-size={}", password.len()))
|
|
||||||
.arg(Path::new("/dev").join(guid).join(name))
|
|
||||||
.invoke(crate::ErrorKind::DiskManagement)
|
|
||||||
.await?;
|
|
||||||
Command::new("cryptsetup")
|
|
||||||
.arg("-q")
|
|
||||||
.arg("luksOpen")
|
|
||||||
.arg(format!("--key-file={}", PASSWORD_PATH))
|
|
||||||
.arg(format!("--keyfile-size={}", password.len()))
|
|
||||||
.arg(Path::new("/dev").join(guid).join(name))
|
|
||||||
.arg(format!("{}_{}", guid, name))
|
|
||||||
.invoke(crate::ErrorKind::DiskManagement)
|
|
||||||
.await?;
|
|
||||||
Command::new("mkfs.ext4")
|
|
||||||
.arg(Path::new("/dev/mapper").join(format!("{}_{}", guid, name)))
|
|
||||||
.invoke(crate::ErrorKind::DiskManagement)
|
|
||||||
.await?;
|
|
||||||
mount(
|
|
||||||
Path::new("/dev/mapper").join(format!("{}_{}", guid, name)),
|
|
||||||
datadir.as_ref().join(name),
|
|
||||||
ReadWrite,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
tokio::fs::remove_file(PASSWORD_PATH)
|
|
||||||
.await
|
|
||||||
.with_ctx(|_| (crate::ErrorKind::Filesystem, PASSWORD_PATH))?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[instrument(skip(datadir, password))]
|
|
||||||
pub async fn create_all_fs<P: AsRef<Path>>(
|
|
||||||
guid: &str,
|
|
||||||
datadir: P,
|
|
||||||
password: &str,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
create_fs(guid, &datadir, "main", MAIN_FS_SIZE, password).await?;
|
|
||||||
create_fs(
|
|
||||||
guid,
|
|
||||||
&datadir,
|
|
||||||
"package-data",
|
|
||||||
FsSize::FreePercentage(100),
|
|
||||||
password,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[instrument(skip(datadir))]
|
|
||||||
pub async fn unmount_fs<P: AsRef<Path>>(guid: &str, datadir: P, name: &str) -> Result<(), Error> {
|
|
||||||
unmount(datadir.as_ref().join(name)).await?;
|
|
||||||
Command::new("cryptsetup")
|
|
||||||
.arg("-q")
|
|
||||||
.arg("luksClose")
|
|
||||||
.arg(format!("{}_{}", guid, name))
|
|
||||||
.invoke(crate::ErrorKind::DiskManagement)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[instrument(skip(datadir))]
|
|
||||||
pub async fn unmount_all_fs<P: AsRef<Path>>(guid: &str, datadir: P) -> Result<(), Error> {
|
|
||||||
unmount_fs(guid, &datadir, "main").await?;
|
|
||||||
unmount_fs(guid, &datadir, "package-data").await?;
|
|
||||||
Command::new("dmsetup")
|
|
||||||
.arg("remove_all") // TODO: find a higher finesse way to do this for portability reasons
|
|
||||||
.invoke(crate::ErrorKind::DiskManagement)
|
|
||||||
.await?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[instrument(skip(datadir))]
|
|
||||||
pub async fn export<P: AsRef<Path>>(guid: &str, datadir: P) -> Result<(), Error> {
|
|
||||||
Command::new("sync").invoke(ErrorKind::Filesystem).await?;
|
|
||||||
unmount_all_fs(guid, datadir).await?;
|
|
||||||
Command::new("vgchange")
|
|
||||||
.arg("-an")
|
|
||||||
.arg(guid)
|
|
||||||
.invoke(crate::ErrorKind::DiskManagement)
|
|
||||||
.await?;
|
|
||||||
Command::new("vgexport")
|
|
||||||
.arg(guid)
|
|
||||||
.invoke(crate::ErrorKind::DiskManagement)
|
|
||||||
.await?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[instrument(skip(datadir, password))]
|
|
||||||
pub async fn import<P: AsRef<Path>>(
|
|
||||||
guid: &str,
|
|
||||||
datadir: P,
|
|
||||||
repair: RepairStrategy,
|
|
||||||
password: &str,
|
|
||||||
) -> Result<RequiresReboot, Error> {
|
|
||||||
let scan = pvscan().await?;
|
|
||||||
if scan
|
|
||||||
.values()
|
|
||||||
.filter_map(|a| a.as_ref())
|
|
||||||
.filter(|a| a.starts_with("EMBASSY_"))
|
|
||||||
.next()
|
|
||||||
.is_none()
|
|
||||||
{
|
|
||||||
return Err(Error::new(
|
|
||||||
eyre!("Embassy disk not found."),
|
|
||||||
crate::ErrorKind::DiskNotAvailable,
|
|
||||||
));
|
|
||||||
}
|
|
||||||
if !scan
|
|
||||||
.values()
|
|
||||||
.filter_map(|a| a.as_ref())
|
|
||||||
.any(|id| id == guid)
|
|
||||||
{
|
|
||||||
return Err(Error::new(
|
|
||||||
eyre!("An Embassy disk was found, but it is not the correct disk for this device."),
|
|
||||||
crate::ErrorKind::IncorrectDisk,
|
|
||||||
));
|
|
||||||
}
|
|
||||||
Command::new("dmsetup")
|
|
||||||
.arg("remove_all") // TODO: find a higher finesse way to do this for portability reasons
|
|
||||||
.invoke(crate::ErrorKind::DiskManagement)
|
|
||||||
.await?;
|
|
||||||
match Command::new("vgimport")
|
|
||||||
.arg(guid)
|
|
||||||
.invoke(crate::ErrorKind::DiskManagement)
|
|
||||||
.await
|
|
||||||
{
|
|
||||||
Ok(_) => Ok(()),
|
|
||||||
Err(e)
|
|
||||||
if format!("{}", e.source)
|
|
||||||
.lines()
|
|
||||||
.any(|l| l.trim() == format!("Volume group \"{}\" is not exported", guid)) =>
|
|
||||||
{
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
Err(e) => Err(e),
|
|
||||||
}?;
|
|
||||||
Command::new("vgchange")
|
|
||||||
.arg("-ay")
|
|
||||||
.arg(guid)
|
|
||||||
.invoke(crate::ErrorKind::DiskManagement)
|
|
||||||
.await?;
|
|
||||||
mount_all_fs(guid, datadir, repair, password).await
|
|
||||||
}
|
|
||||||
|
|
||||||
#[instrument(skip(datadir, password))]
|
|
||||||
pub async fn mount_fs<P: AsRef<Path>>(
|
|
||||||
guid: &str,
|
|
||||||
datadir: P,
|
|
||||||
name: &str,
|
|
||||||
repair: RepairStrategy,
|
|
||||||
password: &str,
|
|
||||||
) -> Result<RequiresReboot, Error> {
|
|
||||||
tokio::fs::write(PASSWORD_PATH, password)
|
|
||||||
.await
|
|
||||||
.with_ctx(|_| (crate::ErrorKind::Filesystem, PASSWORD_PATH))?;
|
|
||||||
Command::new("cryptsetup")
|
|
||||||
.arg("-q")
|
|
||||||
.arg("luksOpen")
|
|
||||||
.arg(format!("--key-file={}", PASSWORD_PATH))
|
|
||||||
.arg(format!("--keyfile-size={}", password.len()))
|
|
||||||
.arg(Path::new("/dev").join(guid).join(name))
|
|
||||||
.arg(format!("{}_{}", guid, name))
|
|
||||||
.invoke(crate::ErrorKind::DiskManagement)
|
|
||||||
.await?;
|
|
||||||
let mapper_path = Path::new("/dev/mapper").join(format!("{}_{}", guid, name));
|
|
||||||
let reboot = repair.e2fsck(&mapper_path).await?;
|
|
||||||
mount(&mapper_path, datadir.as_ref().join(name), ReadWrite).await?;
|
|
||||||
|
|
||||||
tokio::fs::remove_file(PASSWORD_PATH)
|
|
||||||
.await
|
|
||||||
.with_ctx(|_| (crate::ErrorKind::Filesystem, PASSWORD_PATH))?;
|
|
||||||
|
|
||||||
Ok(reboot)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[instrument(skip(datadir, password))]
|
|
||||||
pub async fn mount_all_fs<P: AsRef<Path>>(
|
|
||||||
guid: &str,
|
|
||||||
datadir: P,
|
|
||||||
repair: RepairStrategy,
|
|
||||||
password: &str,
|
|
||||||
) -> Result<RequiresReboot, Error> {
|
|
||||||
let mut reboot = RequiresReboot(false);
|
|
||||||
reboot |= mount_fs(guid, &datadir, "main", repair, password).await?;
|
|
||||||
reboot |= mount_fs(guid, &datadir, "package-data", repair, password).await?;
|
|
||||||
Ok(reboot)
|
|
||||||
}
|
|
||||||