Compare commits

..

817 Commits

Author SHA1 Message Date
ed
d88889d3fc v1.6.14 2023-04-24 06:09:44 +00:00
ed
6f244d4335 update pkgs to 1.6.13 2023-04-24 00:46:47 +00:00
ed
cacca663b3 v1.6.13 2023-04-23 23:05:31 +00:00
ed
d5109be559 ftp: track login state isolated from pyftpdlib;
for convenience, the password can be provided as the username
but that confuses pyftpd a little so let's do this
2023-04-23 21:06:19 +00:00
ed
d999f06bb9 volflags can be -unset 2023-04-23 21:05:29 +00:00
ed
a1a8a8c7b5 configurable tls-certificate location 2023-04-23 20:56:55 +00:00
ed
fdd6f3b4a6 tar/zip: use volume name as toplevel fallback 2023-04-23 20:55:34 +00:00
ed
f5191973df docs cleanup:
* mostly deprecate --http-only and --https-only since there is zero
   performance gain in recent python versions, however could still be
   useful for avoiding limitations in alternative python interpreters
   (and forcing http/https with mdns/ssdp/qr)

* mention antivirus being useless as usual
2023-04-23 20:25:44 +00:00
ed
ddbaebe779 update pkgs to 1.6.12 2023-04-20 22:47:37 +00:00
ed
42099baeff v1.6.12 2023-04-20 21:41:47 +00:00
ed
2459965ca8 u2cli: dont enter delete stage if something failed 2023-04-20 20:40:09 +00:00
ed
6acf436573 u2idx pool instead of per-socket;
prevents running out of FDs thanks to thousands of sqlite3 sessions
and neatly sidesteps what could possibly be a race in python's
sqlite3 bindings where it sometimes forgets to close the fd
2023-04-20 20:36:13 +00:00
ed
f217e1ce71 correctly ignore multirange requests 2023-04-20 19:14:38 +00:00
ed
418000aee3 explain tus incompatibility + update docs 2023-04-19 21:46:33 +00:00
ed
dbbba9625b nix: make deps optional + update docs 2023-04-17 13:17:53 +02:00
Chinpo Nya
397bc92fbc rewrite the nix module config with nix options 2023-04-17 00:26:57 +02:00
Chinpo Nya
6e615dcd03 fix: remove ffmpeg from python env build inputs 2023-04-17 00:26:57 +02:00
Chinpo Nya
9ac5908b33 refactor: remove unnecessary use of 'rec' 2023-04-17 00:26:57 +02:00
Chinpo Nya
50912480b9 automate nix package updates 2023-04-17 00:26:57 +02:00
Chinpo Nya
24b9b8319d nix/nixos documentation 2023-04-17 00:26:57 +02:00
Chinpo Nya
b0f4f0b653 nixos module 2023-04-17 00:26:57 +02:00
Chinpo Nya
05bbd41c4b nix package 2023-04-17 00:26:57 +02:00
ed
8f5f8a3cda expand userhomes everywhere:
* -c
* -lo
* --hist
* hist volflag
* --ssl-log
2023-04-14 18:55:19 +02:00
ed
c8938fc033 fix ipv4 location header on dualstack 2023-04-14 14:06:44 +02:00
ed
1550350e05 update docs (performance tips, windows example) 2023-04-13 21:36:55 +00:00
ed
5cc190c026 better 2023-04-12 22:09:46 +00:00
ed
d6a0a738ce add windows example + update docs + some cosmetics 2023-04-12 22:06:44 +00:00
ed
f5fe3678ee more safari-on-touchbar-macbook workarounds:
* safari invokes pause on the mediasession
   whenever any Audio loads a new src (preload)

* ...and on some(?) seeks
2023-04-07 23:04:01 +02:00
ed
f2a7925387 avoid safari bugs on touchbar macbooks:
* songs would play backwards
* playback started immediately on folder change
2023-04-07 12:38:37 +02:00
ed
fa953ced52 update archpkg to 1.6.11 2023-04-01 22:59:20 +00:00
ed
f0000d9861 v1.6.11 2023-04-01 21:12:54 +00:00
ed
4e67516719 last.fm web-scrobbler support 2023-04-01 21:02:03 +00:00
ed
29db7a6270 deps: automate prismjs build 2023-04-01 17:46:42 +00:00
ed
852499e296 dont panic in case of extension-injected css 2023-04-01 16:08:45 +00:00
ed
f1775fd51c update deps 2023-04-01 15:15:53 +00:00
ed
4bb306932a update systemd notes 2023-04-01 10:32:12 +00:00
ed
2a37e81bd8 add rclone optimization, closes #21 2023-04-01 10:21:21 +00:00
ed
6a312ca856 something dumb 2023-04-01 00:16:30 +00:00
ed
e7f3e475a2 more accurate bpm detector 2023-03-31 21:20:37 +00:00
ed
854ba0ec06 add audio filter plugin thing 2023-03-31 20:20:28 +00:00
ed
209b49d771 remind sqlite we have indexes 2023-03-30 21:45:58 +00:00
ed
949baae539 integrate markdown thumbs with image gallery 2023-03-30 21:21:21 +00:00
ed
5f4ea27586 new hook: exif stripper 2023-03-26 22:19:15 +00:00
ed
099cc97247 hooks: more correct usage examples 2023-03-26 22:18:48 +00:00
ed
592b7d6315 gdi js 2023-03-26 02:06:49 +00:00
ed
0880bf55a1 markdown thumbnails 2023-03-26 01:53:41 +00:00
ed
4cbffec0ec u2cli: show more errors + drop --ws (does nothing) 2023-03-23 23:47:41 +00:00
ed
cc355417d4 update docs 2023-03-23 23:37:45 +00:00
ed
e2bc573e61 webdav correctness:
* generally respond without body
   (rclone likes this)
* don't connection:close on most mkcol errors
2023-03-23 23:25:00 +00:00
ed
41c0376177 update archpkg to 1.6.10 2023-03-20 23:37:20 +00:00
ed
c01cad091e v1.6.10 2023-03-20 21:56:31 +00:00
ed
eb349f339c update foldersync / rclone docs 2023-03-20 21:54:08 +00:00
ed
24d8caaf3e switch rclone to owncloud mode so it sends lastmod 2023-03-20 21:45:52 +00:00
ed
5ac2c20959 basic support for rclone sync 2023-03-20 21:17:53 +00:00
ed
bb72e6bf30 support propfind of files (not just dirs) 2023-03-20 20:58:51 +00:00
ed
d8142e866a accept last-modified from owncloud webdav extension 2023-03-20 20:28:26 +00:00
ed
7b7979fd61 add sftpgo to comparison + update docs 2023-03-19 21:45:35 +00:00
ed
749616d09d help iOS understand short audio files 2023-03-19 20:03:35 +00:00
ed
5485c6d7ca prisonparty: FFmpeg runs faster with /dev/urandom 2023-03-19 18:32:35 +00:00
ed
b7aea38d77 add iOS uploader (mk.ii) 2023-03-18 18:38:37 +00:00
ed
0ecd9f99e6 update archpkg to 1.6.9 2023-03-16 22:34:09 +00:00
ed
ca04a00662 v1.6.9 2023-03-16 21:06:18 +00:00
ed
8a09601be8 url-param ?v disables index.html 2023-03-16 20:52:43 +00:00
ed
1fe0d4693e fix logues bleeding into navpane 2023-03-16 20:23:01 +00:00
ed
bba8a3c6bc fix truncated search results 2023-03-16 20:12:13 +00:00
ed
e3d7f0c7d5 add tooltip delay to android too 2023-03-16 19:48:44 +00:00
ed
be7bb71bbc add option to show index.html instead of listing 2023-03-16 19:41:33 +00:00
ed
e0c4829ec6 verify covers against db instead of fs 2023-03-15 19:48:43 +00:00
ed
5af1575329 readme: ideas welcome w 2023-03-14 22:24:43 +00:00
ed
884f966b86 update archpkg to 1.6.8 2023-03-12 18:55:02 +00:00
ed
f6c6fbc223 fix exe builder 2023-03-12 18:54:16 +00:00
ed
b0cc396bca v1.6.8 2023-03-12 16:10:07 +00:00
ed
ae463518f6 u2cli: send upload stats to server + fix py2.6 support 2023-03-11 21:39:56 +00:00
ed
2be2e9a0d8 index folder thumbs in db 2023-03-11 11:43:29 +00:00
ed
e405fddf74 specify that only up2k clients will resume uploads 2023-03-09 22:47:37 +00:00
ed
c269b0dd91 show an error (instead of crashing) if a pic is 404 2023-03-09 22:37:12 +00:00
ed
8c3211263a keep scanning folders for more music to play 2023-03-09 22:26:41 +00:00
ed
bf04e7c089 update some docs 2023-03-09 22:11:39 +00:00
ed
c7c6e48b1a didn't compress numbered logfiles 2023-03-09 21:59:59 +00:00
ed
974ca773be just to be extra sure 2023-03-09 21:49:29 +00:00
ed
9270c2df19 evict basic-browser from crawlers 2023-03-09 21:35:07 +00:00
ed
b39ff92f34 u2cli: support long paths on win7 2023-03-08 22:27:13 +00:00
ed
7454167f78 add DCO PR template 2023-03-08 08:27:17 +01:00
ed
5ceb3a962f build up2k.exe 2023-03-07 22:58:14 +00:00
ed
52bd5642da update archpkg to 1.6.7 2023-03-05 20:20:15 +00:00
ed
c39c93725f v1.6.7 2023-03-05 20:18:16 +00:00
ed
d00f0b9fa7 ftp: support filezilla mkdir 2023-03-05 20:18:02 +00:00
ed
01cfc70982 add example for webdav automount 2023-03-05 19:52:45 +00:00
ed
e6aec189bd fix flickering toast on upload finish 2023-03-05 19:49:54 +00:00
ed
c98fff1647 fix chunkpost-handshake race (affects --no-dedup only);
a handshake arriving in the middle of the final chunk could cause
dupes to become empty -- worst case leading to loss of data
2023-03-05 19:45:50 +00:00
ed
0009e31bd3 heavy webworker load can park the main thread of a
background chrome tab for 10sec; piggyback some pokes off postmessage
2023-03-02 22:35:32 +00:00
ed
db95e880b2 thats not how it works 2023-02-28 22:19:06 +00:00
ed
e69fea4a59 exe: update screenshots 2023-02-26 22:26:40 +00:00
ed
4360800a6e update archpkg to 1.6.6 2023-02-26 22:11:56 +00:00
ed
b179e2b031 prisonparty: ignore unresolvable mount paths;
allows startup even if some locations are missing,
for example if a server rebooted and some disks aren't up yet
2023-02-26 22:11:15 +00:00
ed
ecdec75b4e v1.6.6 2023-02-26 20:30:17 +00:00
ed
5cb2e33353 update readmes + fix typo 2023-02-26 19:22:54 +00:00
ed
43ff2e531a add deadline for filling data into a reserved filename 2023-02-26 19:13:35 +00:00
ed
1c2c9db8f0 retain upload time (but not ip) on file reindex 2023-02-26 19:09:24 +00:00
ed
7ea183baef let http thread handle upload verification plugins 2023-02-26 19:07:49 +00:00
ed
ab87fac6d8 db got the wrong lastmod when linking dupes 2023-02-26 18:52:04 +00:00
ed
1e3b7eee3b dont rmdir volume top on cleanup 2023-02-26 18:28:37 +00:00
ed
4de028fc3b let controlpanel rescan button override lack of e2dsa 2023-02-26 18:27:10 +00:00
ed
604e5dfaaf improve error handling / messages 2023-02-26 18:26:13 +00:00
ed
05e0c2ec9e add xiu (batching hook; runs on idle after uploads) +
bunch of tweaks/fixes for hooks
2023-02-26 18:23:32 +00:00
ed
76bd005bdc cgen fixes 2023-02-21 19:42:08 +00:00
ed
5effaed352 add reminder that SSDP launches IE by default 2023-02-21 19:38:35 +00:00
ed
cedaf4809f add exe integrity selfcheck 2023-02-21 19:18:10 +00:00
ed
6deaf5c268 add jitter simlation 2023-02-20 21:34:30 +00:00
ed
9dc6a26472 webdav.bat and readme tweaks 2023-02-20 21:00:04 +00:00
ed
14ad5916fc freebsd: fancy console listing for fetch 2023-02-19 22:14:21 +00:00
ed
1a46738649 raise edgecases (broken envs on windows) 2023-02-19 22:13:33 +00:00
ed
9e5e3b099a add optional deps to quickstart section 2023-02-19 22:13:02 +00:00
ed
292ce75cc2 return to previous url after login 2023-02-19 19:58:15 +00:00
ed
ce7df7afd4 update platform support listing 2023-02-19 15:16:50 +00:00
ed
e28e793f81 whoops 2023-02-19 15:11:04 +00:00
ed
3e561976db optimize docker build times (884 to 379 sec) 2023-02-19 14:19:35 +00:00
ed
273a4eb7d0 list supported platforms 2023-02-19 01:00:37 +00:00
ed
6175f85bb6 more docker images for arm, arm64, s390x 2023-02-19 00:50:07 +00:00
ed
a80579f63a build docker for x32 aarch64 armhf ppc64 s390x 2023-02-18 23:04:55 +00:00
ed
96d6bcf26e if non-TLS, show warning in the login form 2023-02-17 22:49:03 +00:00
ed
49e8df25ac ie11: support back button 2023-02-17 22:21:13 +00:00
ed
6a05850f21 also undupe search hits from overlapping volumes 2023-02-17 20:48:57 +00:00
ed
5e7c3defe3 update pypi description + docker links 2023-02-16 19:56:57 +00:00
ed
6c0987d4d0 mention --daw 2023-02-15 17:51:20 +00:00
ed
6eba9feffe condense uploads listing on view change 2023-02-14 21:58:15 +00:00
ed
8adfcf5950 win10-based copyparty64.exe 2023-02-14 21:50:14 +00:00
ed
36d6fa512a mention upcoming libopenmpt availability 2023-02-13 06:57:47 +00:00
ed
79b6e9b393 update archpkg to 1.6.5 2023-02-12 15:38:03 +00:00
ed
dc2e2cbd4b v1.6.5 2023-02-12 14:11:45 +00:00
ed
5c12dac30f most ffmpeg builds dont support compressed modules 2023-02-12 14:02:43 +00:00
ed
641929191e fix reading smb shares on windows 2023-02-12 13:59:34 +00:00
ed
617321631a docker: add annotations 2023-02-11 21:10:28 +00:00
ed
ddc0c899f8 update archpkg to 1.6.4 2023-02-11 21:01:45 +00:00
ed
cdec42c1ae v1.6.4 2023-02-11 18:02:05 +00:00
ed
c48f469e39 park all clients waiting for a transcode 2023-02-11 17:23:29 +00:00
ed
44909cc7b8 print ffmpeg download url on windows 2023-02-11 17:22:24 +00:00
ed
8f61e1568c transcode chiptunes to opus;
* new audio/MPT formats: apac bonk dfpwm ilbc it itgz itr itz mo3 mod mptm mt2 okt s3gz s3m s3r s3z xm xmgz xmr xmz xpk
* new image/PIL formats: blp dcx emf eps fits flc fli fpx im j2k j2p psd spi wmf
2023-02-11 11:17:37 +00:00
ed
b7be7a0fd8 mirror docker images to ghcr 2023-02-10 23:40:30 +00:00
ed
1526a4e084 add docker packaging 2023-02-10 23:02:01 +00:00
ed
dbdb9574b1 doc-browser: fix md scaling + download hotkey 2023-02-10 21:33:48 +00:00
ed
853ae6386c config load summary + safer windows defaults 2023-02-10 21:32:42 +00:00
ed
a4b56c74c7 support long filepaths on win7 + misc windows fixes 2023-02-10 18:37:37 +00:00
ed
d7f1951e44 fix --cgen for 'g' perms 2023-02-08 22:38:21 +00:00
ed
7e2ff9825e ensure -e2tsr takes effect by ignoring dhash 2023-02-08 22:33:02 +00:00
ed
9b423396ec better description for anonymous permissions 2023-02-07 20:12:45 +00:00
ed
781146b2fb describe all database volflags in --help-flags 2023-02-07 20:07:06 +00:00
ed
84937d1ce0 add v2 config syntax (#20) 2023-02-07 19:54:08 +00:00
ed
98cce66aa4 cgen: update set of multivalue keys 2023-02-06 07:26:23 +00:00
ed
043c2d4858 cgen: fix permissions listing 2023-02-06 07:23:35 +00:00
ed
99cc434779 add config explainer + generator (#20) 2023-02-05 22:09:17 +00:00
ed
5095d17e81 more interesting config example 2023-02-05 21:32:20 +00:00
ed
87d835ae37 dont allow multiple volumes at the same fs-path 2023-02-05 21:16:36 +00:00
ed
6939ca768b pkg/arch: add prisonparty 2023-02-05 00:07:04 +00:00
ed
e3957e8239 systemd: prisonparty improvements 2023-02-05 00:03:40 +00:00
ed
4ad6e45216 only load *.conf files when including a folder 2023-02-05 00:01:10 +00:00
ed
76e5eeea3f prisonparty: fix reload signal 2023-02-05 00:00:18 +00:00
ed
eb17f57761 pypi fixes 2023-02-04 17:35:20 +00:00
ed
b0db14d8b0 indicate forced-randomized filenames 2023-02-04 15:18:09 +00:00
ed
2b644fa81b don't alias randomized filenames 2023-02-04 13:41:43 +00:00
ed
190ccee820 add optional version number on controlpanel 2023-02-04 13:41:34 +00:00
JeremyStarTM
4e7dd32e78 Added "wow this is better than nextcloud" (#19)
* Added "wow this is better than nextcloud"
2023-02-04 13:00:16 +00:00
john smith
5817fb66ae goddamn tabs 2023-02-03 12:50:17 +01:00
john smith
9cb04eef93 misc PKGBUILD fixes 2023-02-03 12:50:17 +01:00
john smith
0019fe7f04 indent PKGBUILD with spaces instead of tabs 2023-02-03 12:50:17 +01:00
john smith
852c6f2de1 remove unnecessary dependencies from PKGBUILD 2023-02-03 12:50:17 +01:00
john smith
c4191de2e7 improve PKGBUILD based on stuff in https://github.com/9001/copyparty/issues/17 2023-02-03 12:50:17 +01:00
ed
4de61defc9 add a link exporter to the unpost ui too 2023-02-02 22:57:59 +00:00
ed
0aa88590d0 should generalize this somehow 2023-02-02 22:35:13 +00:00
ed
405f3ee5fe adjustable toast position 2023-02-02 22:28:31 +00:00
ed
bc339f774a button to show/copy links for all recent uploads 2023-02-02 22:27:53 +00:00
ed
e67b695b23 show filekeys in recent-uploads ui 2023-02-02 21:22:51 +00:00
ed
4a7633ab99 fix outdated docs mentioned in #17 sry 2023-02-02 20:12:32 +00:00
john smith
c58f2ef61f fix PKGBUILD more 2023-02-02 20:48:20 +01:00
john smith
3866e6a3f2 fix PKGBUILD indentation 2023-02-02 20:30:48 +01:00
john smith
381686fc66 add PKGBUILD 2023-02-02 20:30:48 +01:00
ed
a918c285bf up2k-ui: button to randomize upload filenames 2023-02-01 22:26:18 +00:00
ed
1e20eafbe0 volflag to randomize all upload filenames 2023-02-01 21:58:01 +00:00
ed
39399934ee v1.6.3 2023-01-31 21:03:43 +00:00
ed
b47635150a shove #files aside while prologue sandbox is loading 2023-01-31 21:02:58 +00:00
ed
78d2f69ed5 prisonparty: support opus transcoding on debian
libblas.so and liblapack.so are symlinks into /etc/alternatives
2023-01-31 20:50:59 +00:00
ed
7a98dc669e block alerts in sandbox by default + add translation 2023-01-31 19:16:28 +00:00
ed
2f15bb5085 include filesize in notification 2023-01-31 19:03:13 +00:00
ed
712a578e6c indicate when a readme/logue was hidden 2023-01-31 19:01:24 +00:00
ed
d8dfc4ccb2 support davfs2 LOCK (uploads) + misc windows support + logue filtering 2023-01-31 18:53:38 +00:00
ed
e413007eb0 hide dotfiles from search results by default 2023-01-31 18:13:33 +00:00
ed
6d1d3e48d8 sandbox height didnt account for scrollbars 2023-01-31 17:54:04 +00:00
ed
04966164ce more iframe-resize-concealing tricks 2023-01-31 17:43:21 +00:00
ed
8b62aa7cc7 unlink files before replacing them
to avoid hardlink-related surprises
2023-01-31 17:17:18 +00:00
ed
1088e8c6a5 optimize 2023-01-30 22:53:27 +00:00
ed
8c54c2226f cover up most of the layout jank 2023-01-30 22:52:16 +00:00
ed
f74ac1f18b fix sandbox lag by helping the iframe cache js 2023-01-30 22:36:05 +00:00
ed
25931e62fd and nofollow the basic-browser link too 2023-01-29 22:15:22 +00:00
ed
707a940399 add nofollow to zip links 2023-01-29 22:10:03 +00:00
ed
87ef50d384 doc 2023-01-29 21:23:48 +00:00
ed
dcadf2b11c v1.6.2 2023-01-29 18:42:21 +00:00
ed
37a690a4c3 fix cookie + rproxy oversights 2023-01-29 18:34:48 +00:00
ed
87ad23fb93 docs + chmod 2023-01-29 18:28:53 +00:00
ed
5f54d534e3 hook/notify: add android support 2023-01-29 15:14:22 +00:00
ed
aecae552a4 v1.6.1 2023-01-29 04:41:16 +00:00
ed
eaa6b3d0be mute some startup noise 2023-01-29 04:33:28 +00:00
ed
c2ace91e52 v1.6.0 2023-01-29 02:55:44 +00:00
ed
0bac87c36f make loss of hotkeys more obvious 2023-01-29 01:40:02 +00:00
ed
e650d05939 shovel across most of the env too 2023-01-29 01:19:53 +00:00
ed
85a96e4446 add custom text selection colors because chrome is broken on fedora 2023-01-29 01:03:10 +00:00
ed
2569005139 support sandboxed markdown plugins 2023-01-29 00:57:08 +00:00
ed
c50cb66aef sandboxed other-origin iframes dont cache css 2023-01-28 23:40:25 +00:00
ed
d4c5fca15b sandbox readme.md / prologue / epilogue 2023-01-28 21:24:40 +00:00
ed
75cea4f684 misc 2023-01-28 13:35:49 +00:00
ed
68c6794d33 rewrite other symlinks after the actual move;
fixes volumes where symlinking is disabled
2023-01-28 01:14:29 +00:00
ed
82f98dd54d delete/move is now POST 2023-01-28 01:02:50 +00:00
ed
741d781c18 add cors controls + improve preflight + pw header 2023-01-28 00:59:04 +00:00
ed
0be1e43451 mention mtp in the hooks readme 2023-01-28 00:07:50 +00:00
ed
5366bf22bb describe detected network changes 2023-01-27 23:56:54 +00:00
ed
bcd91b1809 add eventhook examples 2023-01-27 23:55:57 +00:00
ed
9bd5738e6f shorter fallback hostname 2023-01-27 22:19:25 +00:00
ed
bab4aa4c0a mkdir fix 2023-01-27 22:16:10 +00:00
ed
e965b9b9e2 mkdir missing volumes on startup 2023-01-27 21:52:28 +00:00
ed
31101427d3 support downloading blockdev contents 2023-01-27 21:09:57 +00:00
ed
a083dc36ba dont get confused by dangling symlinks at target 2023-01-27 20:27:00 +00:00
ed
9b7b9262aa promote dedup control to volflags 2023-01-25 21:46:15 +00:00
ed
660011fa6e md-editor: make hotkey ^e more global 2023-01-25 20:58:28 +00:00
ed
ead31b6823 add eventhook sanchecks 2023-01-25 20:51:02 +00:00
ed
4310580cd4 separate http/https logins (breaks ie4 / win3.11 login) 2023-01-24 21:23:57 +00:00
ed
b005acbfda enable text selection between breadcrumbs + update vs 2023-01-23 22:44:29 +00:00
ed
460709e6f3 upgrade wget downloader to use event hooks 2023-01-22 23:45:11 +00:00
ed
a8768d05a9 add comparison to similar software 2023-01-22 23:39:19 +00:00
ed
f8e3e87a52 add event hooks 2023-01-22 23:35:31 +00:00
ed
70f1642d0d allow tar/zip download of hidden folders 2023-01-21 20:56:44 +00:00
ed
3fc7561da4 macos 2023-01-21 10:36:31 +00:00
ed
9065226c3d oh great its in lts too 2023-01-21 10:19:04 +00:00
ed
b7e321fa47 cleanup 2023-01-19 22:26:49 +00:00
ed
664665b86b fix some location-rproxy bugs 2023-01-19 22:26:24 +00:00
ed
f4f362b7a4 add --freebind 2023-01-18 21:55:36 +00:00
ed
577d23f460 zeroconf: detect network change and reannounce 2023-01-18 21:27:27 +00:00
ed
504e168486 compensate avg.speed for single-chunk uploads 2023-01-18 19:53:19 +00:00
ed
f2f9640371 workaround firefox layout bug:
three-line toasts get a scrollbar even if it doesn't need one
and the width is not adjusted correctly when that happens
2023-01-18 19:45:04 +00:00
ed
ee46f832b1 u2cli: add option -ns for slow terminals 2023-01-17 23:29:51 +00:00
ed
b0e755d410 give curl colored (yet sortable) plaintext listings 2023-01-17 23:22:43 +00:00
ed
cfd24604d5 ux tweaks 2023-01-17 23:21:31 +00:00
ed
264894e595 add cursed usecases 2023-01-16 21:46:11 +00:00
ed
5bb9f56247 linux 6.1 fixed the 6.0 bugs; remove workarounds 2023-01-16 20:44:57 +00:00
ed
18942ed066 location-based rproxy fixes 2023-01-16 20:09:45 +00:00
ed
85321a6f31 stale tree is better than no tree 2023-01-15 20:54:03 +00:00
ed
baf641396d add optional powered-by footnode 2023-01-15 20:52:38 +00:00
ed
17c91e7014 override bogus mimetypes 2023-01-14 15:10:32 +00:00
ed
010770684d workaround another linux kernel bug 2023-01-14 08:16:15 +00:00
ed
b4c503657b ignore loss of stdout 2023-01-14 07:35:44 +00:00
ed
71bd306268 fix unpost filters with slashes 2023-01-13 17:56:32 +00:00
ed
dd7fab1352 u2cli: properly retry failed handshakes 2023-01-13 07:17:41 +00:00
ed
dacca18863 v1.5.6 2023-01-12 05:15:30 +00:00
ed
53d92cc0a6 faster upload of small files on high-latency nets 2023-01-12 02:53:22 +00:00
ed
434823f6f0 ui: allow changing num.threads in search-only 2023-01-11 16:14:02 +00:00
ed
2cb1f50370 fix dualstack on lo 2023-01-11 16:10:07 +00:00
ed
03f53f6392 gallery: fix js error on digit-keypress viewing pics 2023-01-11 16:08:15 +00:00
ed
a70ecd7af0 v1.5.5 2022-12-30 07:54:34 +00:00
ed
8b81e58205 mdns fixes 2022-12-30 07:47:53 +00:00
ed
4500c04edf v1.5.4 2022-12-29 04:44:15 +00:00
ed
6222ddd720 fix ssdp on dualstack 2022-12-22 16:50:46 +00:00
ed
8a7135cf41 support fat32 time precision, avoiding rescans
posted from warzaw airport otw to japan
2022-12-20 22:19:32 +01:00
ed
b4c7282956 password from file 2022-12-20 13:28:48 +00:00
ed
8491a40a04 Create SECURITY.md 2022-12-19 21:18:27 +00:00
ed
343d38b693 extend image-viewer with modern formats 2022-12-15 22:38:33 +00:00
ed
6cf53d7364 try next thumbnailer if one fails;
libvips assumes imagemagick was built with avif
2022-12-15 22:34:51 +00:00
ed
b070d44de7 libvips logging + raise codec errors 2022-12-15 22:22:04 +00:00
ed
79aa40fdea cosmetic fixes 2022-12-14 23:12:51 +00:00
ed
dcaff2785f v1.5.3 2022-12-13 19:56:34 +00:00
ed
497f5b4307 add hotkey to enable download mode 2022-12-13 19:50:20 +00:00
ed
be32ad0da6 add sfx tester 2022-12-13 19:05:10 +00:00
ed
8ee2bf810b stop battleplan from indirectly crashing the browser 2022-12-13 18:58:16 +00:00
ed
28232656a9 folder-sync optimizations 2022-12-13 18:56:40 +00:00
ed
fbc2424e8f v1.5.2 2022-12-12 22:59:31 +00:00
ed
94cd13e8b8 reorder help categories 2022-12-12 22:18:17 +00:00
ed
447ed5ab37 windows fixes 2022-12-12 21:59:50 +00:00
ed
af59808611 u2cli: always compare toplevel in syncs 2022-12-12 07:16:05 +01:00
ed
e3406a9f86 dont cls by default 2022-12-11 22:46:21 +00:00
ed
7fd1d6a4e8 rename --webroot to --rp-loc and fix related bugs 2022-12-11 21:09:50 +00:00
ed
0ab2a665de add example apache config + readme notes 2022-12-11 21:01:38 +00:00
ed
3895575bc2 add sliding window for upload eta 2022-12-11 19:46:39 +00:00
ed
138c2bbcbb o no 2022-12-11 18:30:29 +00:00
ed
bc7af1d1c8 u2cli: add basic folder sync 2022-12-11 17:41:10 +00:00
ed
19cd96e392 cleanup + optimizations 2022-12-11 14:16:51 +00:00
ed
db194ab519 support location-based rproxy 2022-12-10 23:43:31 +00:00
ed
02ad4bfab2 ensure consistency between db tables 2022-12-10 22:13:21 +00:00
ed
56b73dcc8a up2k: add option to replace existing file 2022-12-10 19:22:16 +00:00
ed
7704b9c8a2 sqlite durability profiles 2022-12-10 10:01:33 +00:00
ed
999b7ae919 safer to merge wal on startup instead 2022-12-09 19:58:13 +00:00
ed
252b5a88b1 use linklocal on NICs without routable IPs 2022-12-09 19:11:26 +00:00
ed
01e2681a07 davfs2 requires realm 2022-12-09 17:59:24 +00:00
ed
aa32f30202 zeroconf: dont cache until resolved 2022-12-08 18:05:45 +00:00
ed
195eb53995 merge wal on shutdown 2022-12-07 23:09:40 +00:00
ed
06fa78f54a windows: set .hist folder hidden 2022-12-07 22:56:30 +00:00
ed
7a57c9dbf1 translation 2022-12-07 22:47:33 +00:00
ed
bb657bfa85 more intuitive batch-unpost ordering 2022-12-07 22:30:48 +00:00
ed
87181726b0 sfx: fix multiprocessing on windows 2022-12-07 22:21:28 +00:00
ed
f1477a1c14 block other copyparties from sniping tcp ports 2022-12-07 21:50:52 +00:00
ed
4f94a9e38b exe: survive ascii locales 2022-12-07 21:35:53 +00:00
ed
fbed322d3b option to skip database syncs entirely 2022-12-07 21:35:04 +00:00
ed
9b0f519e4e switch to wal for ~2x faster uploads 2022-12-07 20:52:17 +00:00
ed
6cd6dadd06 optional linklocal ipv6 support (firefox/ie11 only) 2022-12-05 20:45:21 +00:00
ed
9a28afcb48 custom mediaplayer-toggle cursor 2022-12-05 19:46:48 +00:00
ed
45b701801d fix ssdp xml escaping + target url 2022-12-05 19:13:47 +00:00
ed
062246fb12 allow specifying zeroconf filters by subnet 2022-12-05 17:56:39 +00:00
ed
416ebfdd68 right, windows nic names have whitespace 2022-12-05 17:35:12 +00:00
ed
731eb92f33 fix exception opening the connect page on phones 2022-12-04 17:18:14 +00:00
ed
dbe2aec79c v1.5.1 2022-12-03 20:48:52 +00:00
ed
cd9cafe3a1 v1.5.0 2022-12-03 20:45:49 +00:00
ed
067cc23346 docs + cleanup 2022-12-03 18:58:56 +00:00
ed
c573a780e9 some failsafes 2022-12-03 16:37:14 +00:00
ed
8ef4a0aa71 fix testrunner + packaging 2022-12-03 15:07:47 +00:00
ed
89ba12065c ssdp: add ie8 compat 2022-12-03 13:59:46 +00:00
ed
99efc290df fix mdns on windows 2022-12-03 13:31:00 +00:00
ed
2fbdc0a85e misc fixes / cleanup 2022-12-02 23:42:46 +00:00
ed
4242422898 update deps: marked.js, codemirror 2022-12-02 21:39:04 +00:00
ed
008d9b1834 add textbox placeholders 2022-12-02 18:33:04 +00:00
ed
7c76d08958 drop one of the slowloris detectors 2022-12-02 17:53:23 +00:00
ed
89c9f45fd0 add option for cross-volume dedupe 2022-12-02 17:25:37 +00:00
ed
f107497a94 a bit better 2022-12-01 22:18:17 +00:00
ed
b5dcf30e53 w/a firefox sometimes loading stale documents
never been able to reproduce it intentionally but this should work
2022-12-01 21:52:40 +00:00
ed
0cef062084 misc cleanup 2022-12-01 21:44:31 +00:00
ed
5c30148be4 also scroll to playing track when resizing window 2022-11-29 22:16:14 +00:00
ed
3a800585bc u2cli: server is allowed to reject dupes 2022-11-29 22:09:32 +00:00
ed
29c212a60e macos bigsur breaks on symlinks in ftp listings 2022-11-28 22:10:05 +00:00
ed
2997baa7cb better recovery from i/o errors 2022-11-28 22:06:31 +00:00
ed
dc6bde594d fix make-sfx macos support 2022-11-28 21:38:50 +00:00
ed
e357aa546c add browserchrome color hint 2022-11-28 21:19:42 +00:00
ed
d3fe19c5aa misc fixes 2022-11-28 20:25:32 +00:00
ed
bd24bf9bae option to follow playing song 2022-11-28 20:24:47 +00:00
ed
ee141544aa option for compact mediaplayer 2022-11-28 20:10:10 +00:00
ed
db6f6e6a23 option to hide scrollbars 2022-11-28 19:47:14 +00:00
ed
c7d950dd5e ux tweaks + devdocs 2022-11-27 22:07:28 +00:00
ed
6a96c62fde ok windows is just gonna have to make do 2022-11-27 22:05:38 +00:00
ed
36dc8cd686 readme + misc 2022-11-27 01:30:18 +00:00
ed
7622601a77 forgot to actually enable the new landing page 2022-11-27 00:01:28 +00:00
ed
cfd41fcf41 zeroconf: add network filtering options 2022-11-26 22:37:12 +00:00
ed
f39e370e2a cosmetic 2022-11-26 22:27:09 +00:00
ed
c1315a3b39 webdav: misc fixes 2022-11-26 20:06:48 +00:00
ed
53b32f97e8 ftp: support touch+write, windows-login, verbosity 2022-11-26 20:03:17 +00:00
ed
6c962ec7d3 rename copyparty-fuse to partyfuse 2022-11-26 20:01:20 +00:00
ed
6bc1bc542f rename copyparty-fuse to partyfuse 2022-11-26 19:53:41 +00:00
ed
f0e78a6826 add landing page with mounting instructions 2022-11-26 19:47:27 +00:00
ed
e53531a9fb ssdp: get rid of ipv6 + fix http port selection 2022-11-23 22:44:17 +00:00
ed
5cd9d11329 add ssdp responder 2022-11-22 21:40:12 +00:00
ed
5a3e504ec4 uninvent a square wheel 2022-11-22 19:12:41 +00:00
ed
d6e09c3880 ux: dedicated column-hiding mode on phones 2022-11-21 20:44:58 +00:00
ed
04f44c3c7c add global option for rejecting dupe uploads 2022-11-21 10:58:15 +00:00
ed
ec587423e8 show/hide tagsearch ui based on folder flags 2022-11-20 23:30:01 +00:00
ed
f57b31146d improve parent-folder button on phones 2022-11-20 22:37:55 +00:00
ed
35175fd685 mdns: support primitive clients (android, rfc-6.7) 2022-11-20 20:31:11 +00:00
ed
d326ba9723 ftp: ban password-bruteforcing IPs 2022-11-20 11:06:07 +00:00
ed
ab655a56af add buttons for prev/next folder 2022-11-19 22:19:38 +00:00
ed
d1eb113ea8 add button+hotkey to download all selected files 2022-11-19 21:57:25 +00:00
ed
74effa9b8d audioplayer: time at mousecursor while scrubbing 2022-11-19 20:00:50 +00:00
ed
bba4b1c663 sfx: py3.12 support 2022-11-19 10:47:54 +00:00
ed
8709d4dba0 macos smb: avoid hang on shutdown 2022-11-17 21:17:54 +00:00
ed
4ad4657774 mdns: support running on macos 2022-11-17 20:18:24 +00:00
ed
5abe0c955c this spec is confusing 2022-11-17 09:08:58 +00:00
ed
0cedaf4fa9 isort 2022-11-15 22:41:35 +00:00
ed
0aa7d12704 add option to disable .hist/up2k.snap 2022-11-15 22:16:53 +00:00
ed
a234aa1f7e cleaner shutdown of smbd, mdns 2022-11-15 21:55:02 +00:00
ed
9f68287846 workaround impacket glob bug 2022-11-15 21:29:02 +00:00
ed
cd2513ec16 logging fixes 2022-11-15 21:28:27 +00:00
ed
91d132c2b4 add basic-ui hint for firefox 8 and older 2022-11-15 20:17:53 +00:00
ed
97ff0ebd06 xz-compress logs only if -lo ends with .xz 2022-11-15 20:16:41 +00:00
ed
8829f56d4c mdns ipv6 fixes; now works on ie11/safari, not linux:
* subscribe/announce on LL only
* add NSEC records if 4/6-only
2022-11-15 06:39:53 +00:00
ed
37c1cab726 dnslib tweaks for mdns / py3 2022-11-13 20:06:39 +00:00
ed
b3eb117e87 add mdns zeroconf announcer 2022-11-13 20:05:16 +00:00
ed
fc0a941508 support old linux consoles 2022-11-06 16:58:00 +00:00
ed
c72753c5da add native ipv6 support 2022-11-06 16:48:05 +00:00
ed
e442cb677a improve ftp/smb logging 2022-11-06 13:30:16 +00:00
ed
450121eac9 ftpd: kde tries to cwd into images 2022-11-05 13:24:00 +00:00
ed
b2ab8f971e add config-file preprocessor (%include) 2022-11-04 23:48:14 +00:00
ed
e9c6268568 add more sfx opt-outs 2022-11-04 20:50:52 +00:00
ed
2170ee8da4 improve scheduling 2022-11-04 20:28:05 +00:00
ed
357e7333cc cleanup 2022-11-04 20:27:16 +00:00
ed
8bb4f02601 add textlabel on volume slider 2022-11-04 20:04:39 +00:00
ed
4213efc7a6 optimize more 2022-11-04 19:33:48 +00:00
ed
67a744c3e8 audioplayer: optimize ui for week-long audio files 2022-11-03 23:20:58 +00:00
ed
98818e7d63 smb: workaround impacket response size limit 2022-11-03 23:17:24 +00:00
ed
8650ce1295 smb: too many clients get confused by blank password 2022-11-03 23:08:04 +00:00
ed
9638267b4c up2k-ui: survive hitting inaccessible subfolders 2022-11-02 22:02:46 +00:00
ed
304e053155 improve default-gateway / external-IP detection 2022-11-02 21:43:20 +00:00
ed
89d1f52235 cursory slowloris / buggy-webdav-client detector 2022-11-01 22:18:20 +00:00
ed
3312c6f5bd autoclose connection-flooding clients 2022-10-31 22:42:47 +00:00
ed
d4ba644d07 autodefault -nc based on OS limits 2022-10-31 19:37:37 +00:00
ed
b9a504fd3a x32/x64-agnostic exe builder 2022-10-30 18:35:27 +00:00
ed
cebac523dc fix url anchors into markdown docs 2022-10-30 18:03:40 +00:00
ed
c2f4090318 webdav: mute some macos spam 2022-10-30 17:45:28 +00:00
ed
d562956809 webdav: windows configurator util 2022-10-30 17:41:33 +00:00
ed
62499f9b71 webdav: more sensible overwrite logic 2022-10-30 17:13:06 +00:00
ed
89cf7608f9 webdav: help windows deal with read-only volumes 2022-10-30 17:11:43 +00:00
ed
dd26b8f183 webdav: bump chunksize from 2048 to 32760 byte 2022-10-30 16:53:15 +00:00
ed
79303dac6d webdav: default-disable recursive listing 2022-10-30 16:47:20 +00:00
ed
4203fc161b misc 2022-10-30 16:31:04 +00:00
ed
f8a31cc24f chrome can play some mkv files 2022-10-30 16:12:47 +00:00
ed
fc5bfe81a0 add hotkey '?' for hotkeys listing 2022-10-30 16:05:14 +00:00
ed
aae14de796 mouse3 docs in the navpane 2022-10-30 13:13:58 +00:00
ed
54e1c8d261 remove 697 GiB upload filesize limit 2022-10-30 12:51:20 +00:00
ed
a0cc4ca4b7 up2k-cli: enable mt if chrome 107 or later 2022-10-29 22:57:59 +00:00
ed
2701108c5b up2k-ui: suggest potato to avoid firefox-bug 1790500 2022-10-29 22:46:13 +00:00
ed
73bd2df2c6 more metadata-parser debug options 2022-10-29 21:59:59 +00:00
ed
0063021012 mtp-deps: add fedora support 2022-10-29 21:38:08 +00:00
ed
1c3e4750b3 better android howto 2022-10-29 20:46:22 +00:00
ed
edad3246e0 make pylance happier 2022-10-29 20:40:25 +00:00
ed
3411b0993f fix msg-to-log 2022-10-26 02:35:32 +02:00
ed
097b5609dc support grapheneos 2022-10-26 02:35:10 +02:00
ed
a42af7655e fix relative link 2022-10-26 02:32:24 +02:00
ed
69f78b86af cleanup 2022-10-25 01:23:41 +02:00
ed
5f60c509c6 smb: add better-than-nothing permission checks 2022-10-24 21:16:57 +02:00
ed
75e5e53276 readme refactor 2022-10-24 18:48:12 +02:00
ed
4b2b4ed52d smb: fix file rename 2022-10-24 16:08:02 +02:00
ed
fb21bfd6d6 update localmount / rclone docs 2022-10-24 15:48:34 +02:00
ed
f14369e038 webdav: mkdir semantics 2022-10-24 14:09:09 +02:00
ed
ff04b72f62 smb: add mkdir/copy/rename/delete 2022-10-24 14:08:32 +02:00
ed
4535a81617 smb: add up2k-indexing on write 2022-10-24 13:44:19 +02:00
ed
cce57b700b fix range-request on empty files 2022-10-24 03:26:32 +02:00
ed
5b6194d131 stop win10-webdav from flooding the server 2022-10-24 02:33:23 +02:00
ed
2701238cea reply raw markdown unless ?v 2022-10-24 02:10:07 +02:00
ed
835f8a20e6 default-enable webdav 2022-10-23 23:37:32 +02:00
ed
f3a501db30 add SMB/CIFS server 2022-10-23 23:08:00 +02:00
ed
4bcd30da6b cleaner daemon instancing 2022-10-23 12:05:44 +02:00
ed
947dbb6f8a webdav mimetypes based on file extensions (for gnome) 2022-10-22 02:08:19 +02:00
ed
1c2fedd2bf let webdav replace empty files when sufficiently safe 2022-10-22 01:31:18 +02:00
ed
32e826efbc catch and discard macos metadata files 2022-10-22 01:15:54 +02:00
ed
138b932c6a add webdav move/delete 2022-10-22 00:04:51 +02:00
ed
6da2f53aad avoid macos tmpfiles-cleaner 2022-10-21 18:49:25 +02:00
ed
20eeacaac3 add webdav write support + fix http 200/201 2022-10-21 18:47:48 +02:00
ed
81d896be9f webdav notes 2022-10-19 15:52:19 +02:00
ed
c003dfab03 unbold ansi grays 2022-10-19 15:30:17 +02:00
ed
20c6b82bec replace magic numbers with errno.* 2022-10-19 15:21:48 +02:00
ed
046b494b53 winpe support + windows webdav stuff 2022-10-19 00:06:48 +02:00
ed
f0e98d6e0d win7 webdav workarounds 2022-10-18 20:52:12 +02:00
ed
fe57321853 correct 401/403 usage for webdav 2022-10-18 20:29:06 +02:00
ed
8510804e57 initial webdav support 2022-10-18 19:36:52 +02:00
ed
acd32abac5 v1.4.6 2022-10-13 21:37:05 +02:00
ed
2b47c96cf2 move licenses into module proper 2022-10-13 21:14:42 +02:00
ed
1027378bda language + cleanup 2022-10-13 20:43:30 +02:00
ed
e979d30659 audioplayer: transcode wav to opus 2022-10-13 20:26:43 +02:00
ed
574db704cc packaging 2022-10-13 20:24:45 +02:00
ed
fdb969ea89 explain why extractall is safe to use 2022-10-11 17:44:38 +02:00
ed
08977854b3 a e s t h e t i c 2022-10-09 22:56:27 +02:00
ed
cecac64b68 v1.4.5 2022-10-09 11:19:40 +02:00
ed
7dabdade2a v1.4.4 2022-10-09 01:08:31 +02:00
ed
e788f098e2 dont fallback to icons for waveforms 2022-10-09 00:38:56 +02:00
ed
69406d4344 readme + better window title 2022-10-09 00:04:02 +02:00
ed
d16dd26c65 misc 2022-10-08 21:09:04 +02:00
ed
12219c1bea more fun with symlinks 2022-10-08 21:08:51 +02:00
ed
118bdcc26e 120x faster folder moves/renames 2022-10-08 19:11:03 +02:00
ed
78fa96f0f4 add unpost sanchk 2022-10-08 18:23:41 +02:00
ed
c7deb63a04 actually thats just an android-firefox bug 2022-10-08 17:52:29 +02:00
ed
4f811eb9e9 hmac cache limit + android ux:
onscroll doesnt trigger so files dont load in
2022-10-08 17:46:04 +02:00
ed
0b265bd673 naming is hard 2022-10-08 16:34:04 +02:00
ed
ee67fabbeb update readme 2022-10-08 14:25:13 +02:00
ed
b213de7e62 update readme + tests 2022-10-08 14:18:52 +02:00
ed
7c01505750 phone ux 2022-10-08 14:11:25 +02:00
ed
ae28dfd020 tweaks 2022-10-08 02:05:15 +02:00
ed
2a5a4e785f include filekeys in unpost list 2022-10-08 01:18:27 +02:00
ed
d8bddede6a new permission G returns filekey on write-only uploads 2022-10-08 01:17:41 +02:00
ed
b8a93e74bf fix default upload expiration + ux 2022-10-07 21:38:01 +02:00
ed
e60ec94d35 draw qr-code as ansi colors 2022-10-07 01:04:23 +02:00
ed
84af5fd0a3 scale qr-code to fit console size 2022-10-07 00:59:44 +02:00
ed
dbb3edec77 print qr-code on startup 2022-10-07 00:47:26 +02:00
ed
d284b46a3e rice 2022-10-06 23:40:06 +02:00
ed
9fcb4d222b reserve names to avoid ie11 pollution 2022-10-06 01:33:34 +02:00
ed
d0bb1ad141 v1.4.3 2022-09-26 22:37:54 +02:00
ed
b299aaed93 fix some cases of deleted files not being forgotten 2022-09-26 22:19:46 +02:00
ed
abb3224cc5 option to save a copy of corrupted uploads 2022-09-26 22:01:49 +02:00
ed
1c66d06702 cleanup versionchecks 2022-09-25 21:31:47 +02:00
ed
e00e80ae39 v1.4.2 2022-09-25 14:36:10 +02:00
ed
4f4f106c48 add ability to forget uploads by deleting the files 2022-09-25 14:24:01 +02:00
ed
a286cc9d55 fix printing big unicode messages 2022-09-25 14:04:35 +02:00
ed
53bb1c719b fix huge-filename trunc on ubuntu-20.04 zfs 2022-09-25 14:00:11 +02:00
ed
98d5aa17e2 notes on dealing with bitflips 2022-09-24 22:41:00 +02:00
ed
aaaa80e4b8 v1.4.1 2022-09-24 14:45:50 +02:00
ed
e70e926a40 support up2k uploads from old browsertabs 2022-09-24 14:35:51 +02:00
ed
e80c1f6d59 mention how ffmpeg was built 2022-09-24 00:05:47 +02:00
ed
24de360325 v1.4.0 2022-09-23 22:53:51 +02:00
ed
e0039bc1e6 syntax-hl: elixir, glsl, matlab, moonscript, nim, zig 2022-09-23 22:32:40 +02:00
ed
ae5c4a0109 update webdeps + isort + readme 2022-09-23 22:32:04 +02:00
ed
1d367a0da0 cleanup 2022-09-23 20:37:37 +02:00
ed
d285f7ee4a macos-safari support 2022-09-23 19:36:07 +02:00
ed
37c84021a2 up2k: folder-upload without drag/drop 2022-09-22 21:58:04 +02:00
ed
8ee9de4291 up2k: add separate sfx toggle 2022-09-22 20:12:25 +02:00
ed
249b63453b good api 2022-09-22 19:20:33 +02:00
ed
1c0017d763 up2k: upload-complete notification 2022-09-21 23:39:36 +02:00
ed
df51e23639 playing next folder makes no sense in search results 2022-09-21 22:30:31 +02:00
ed
32e71a43b8 reinvent fail2ban 2022-09-21 22:27:20 +02:00
ed
47a1e6ddfa avoid windows funk 2022-09-21 08:25:44 +02:00
ed
c5f41457bb add ffmpeg build notes 2022-09-21 08:17:26 +02:00
ed
f1e0c44bdd better autocorrect for poor ffmpeg builds 2022-09-20 23:25:35 +02:00
ed
9d2e390b6a shrink the exe + add errorhandler 2022-09-20 21:40:56 +02:00
ed
75a58b435d reject anon ftp if anon has no read/write 2022-09-20 21:40:21 +02:00
ed
f5474d34ac embed licenses 2022-09-20 20:11:38 +02:00
ed
c962d2544f ux 2022-09-20 20:07:02 +02:00
ed
0b87a4a810 allow setting lifetimes from up2k ui 2022-09-19 23:49:07 +02:00
ed
1882afb8b6 whoops 2022-09-19 02:10:14 +02:00
ed
2270c8737a and audio seekpoints got floored to ints 2022-09-19 01:30:59 +02:00
ed
d6794955a4 playback position covered up the waveform 2022-09-19 01:23:40 +02:00
ed
f5520f45ef add pyinstaller 2022-09-19 00:59:54 +02:00
ed
9401b5ae13 add filetype detection for nameless uploads 2022-09-18 17:30:57 +02:00
ed
df64a62a03 patch popen on windows-python <3.8 2022-09-18 15:09:41 +02:00
ed
09cea66aa8 add ability to set lifetime per-file during upload 2022-09-18 13:12:38 +02:00
ed
13cc33e0a5 support random filenames in bup too 2022-09-18 01:03:38 +02:00
ed
ab36c8c9de fix tests 2022-09-18 00:16:40 +02:00
ed
f85d4ce82f support alpine's ffmpeg 2022-09-17 23:56:32 +02:00
ed
6bec4c28ba add waveform seekbar 2022-09-17 23:40:37 +02:00
ed
fad1449259 drop the redundant request for folders on navigation 2022-09-17 21:39:44 +02:00
ed
86b3b57137 smaller optimizations 2022-09-17 20:39:08 +02:00
ed
b235037dd3 5x faster rendering of huge tagsets 2022-09-17 20:17:24 +02:00
ed
3108139d51 30% faster tags listing 2022-09-17 19:36:42 +02:00
ed
2ae99ecfa0 new upload modifiers:
* terse upload responser
* randomize filenames
2022-09-17 14:48:53 +02:00
ed
e8ab53c270 fix read-only search positioning 2022-09-17 13:45:41 +02:00
ed
5e9bc1127d fix windows symlink creation 2022-09-17 13:27:54 +02:00
ed
415e61c3c9 prevent blanks from skipping ahead in the queue 2022-09-16 23:51:55 +02:00
ed
5152f37ec8 fix sfx keepalive across unix users 2022-09-16 22:19:59 +02:00
ed
0dbeb010cf fix symlinked filekeys 2022-09-16 21:41:17 +02:00
ed
17c465bed7 lazyload big folders; closes #11 2022-09-15 23:43:40 +02:00
ed
add04478e5 multiprocessing: fix listening-socket config 2022-09-15 22:25:11 +02:00
ed
6db72d7166 optimizations / cleanup 2022-09-15 01:18:19 +02:00
ed
868103a9c5 more flexible --stackmon 2022-09-14 02:06:34 +02:00
ed
0f37718671 improve error messages 2022-09-14 01:56:16 +02:00
icxes
fa1445df86 align grid items to left if there's not enough to fill a row 2022-09-12 00:58:54 +02:00
icxes
a783e7071e add small margin to grid 2022-09-12 00:58:54 +02:00
icxes
a9919df5af change justify-content depending on whether sidebar is open 2022-09-12 00:58:54 +02:00
icxes
b0af31ac35 fix indentation? 2022-09-12 00:58:54 +02:00
icxes
c4c964a685 simplify style and make gaps equal size 2022-09-12 00:58:54 +02:00
icxes
348ec71398 make grid items scale properly at different zoom levels 2022-09-12 00:58:54 +02:00
exci
a257ccc8b3 try using grids for the.. grids 2022-09-12 00:58:54 +02:00
ed
fcc4296040 mention the upcoming bugfix in chrome 2022-09-11 22:31:36 +02:00
ed
1684d05d49 dont crash chrome with too many unique SVGs 2022-09-11 11:47:26 +02:00
ed
0006f933a2 hmac uploader-ip when avoiding filename collisions 2022-09-11 08:27:45 +02:00
ed
0484f97c9c stop writing upload-summary textfiles,
can be reenabled with --write-uplog
2022-09-10 22:07:10 +02:00
ed
e430b2567a add pyoxidizer (windows-only) 2022-09-10 17:33:04 +02:00
ed
fbc8ee15da make firefox stop complaining 2022-09-08 19:22:51 +02:00
ed
68a9c05947 load eq ui early 2022-09-08 18:47:30 +02:00
ed
0a81aba899 fix wrong ETA after failed handshakes +
tooltip-hint positioning on bottom-most elements
2022-09-07 23:34:43 +02:00
ed
d2ae822e15 more socket cleanup fiddling 2022-09-07 23:06:12 +02:00
ed
fac4b08526 firefox may forget FDs during upload; warn about it 2022-09-07 23:03:48 +02:00
ed
3a7b43c663 dodge firefox race (thx exci) 2022-09-07 21:27:36 +02:00
ed
8fcb2d1554 defer actx until needed (audioplayer, uploads) and
try to be less reliant on the actx speedhack for upload performance
2022-09-07 21:08:09 +02:00
ed
590c763659 add unforgetti beam 2022-09-07 08:09:32 +02:00
ed
11d1267f8c option to keep files in index when deleted 2022-09-07 01:07:21 +02:00
ed
8f5bae95ce fix visual glitches in upload ui 2022-09-07 00:38:19 +02:00
ed
e6b12ef14c hide warnings when they are remedied 2022-09-07 00:29:26 +02:00
ed
b65674618b fix ui bug on upload-queues >= 1 TiB large 2022-09-06 23:24:58 +02:00
ed
20dca2bea5 mtp: add guestbook reader 2022-09-05 20:23:59 +02:00
ed
059e93cdcf u2cli: fix py3.5 support + better deps warning 2022-09-05 18:24:18 +02:00
ed
635ab25013 up2k.js: defer worker startup until needed 2022-09-05 00:55:52 +02:00
ed
995cd10df8 bump timeouts for zfs / bursty filesystems 2022-09-04 21:21:54 +02:00
ed
50f3820a6d downgrade severity of some transient errors 2022-09-04 12:53:49 +02:00
ed
617f3ea861 up2k-hook-ytid: discover related files in subfolders 2022-09-04 12:20:40 +02:00
ed
788db47b95 option to let mtp's keep stdout/stderr 2022-09-04 01:42:28 +02:00
ed
5fa8aaabb9 up2k-hook-ytid: comment-field example 2022-09-04 00:06:42 +02:00
ed
89d1af7f33 this actually serves a purpose but please dont ask 2022-09-03 20:19:16 +02:00
ed
799cf27c5d restore .bin-suffix for nameless PUT/POSTs
disappeared in v1.0.11
2022-09-03 19:59:59 +02:00
ed
c930d8f773 add mtp debug mode 2022-09-03 19:58:10 +02:00
ed
a7f921abb9 up2k-hook-ytid: support tiny files 2022-09-03 15:08:08 +02:00
ed
bc6234e032 parallel socket shutdown 2022-08-31 08:38:34 +02:00
ed
558bfa4e1e siocoutq-based shutdown 2022-08-31 01:16:09 +02:00
ed
5d19f23372 accurate num.cores detection 2022-08-29 19:24:48 +02:00
ed
27f08cdbfa better isNaN + fade + fix preload seek:
* use Number.isFinite or shim it, rejecting strings
* fade-in/out was too quick on volumes < 100%
* fades (especially -out) was too slow on chrome
* seek to start if playing into the previously played file
* and let π raise if it wants to
2022-08-29 19:23:23 +02:00
ed
993213e2c0 mtp/vidchk: support stuff like rag-prep 2022-08-24 23:25:03 +02:00
ed
49470c05fa well that was dumb 2022-08-23 00:03:04 +02:00
ed
ee0a060b79 mention the chrome gc bugs 2022-08-20 09:25:29 +02:00
ed
500e3157b9 v1.3.16 2022-08-18 19:24:06 +02:00
ed
eba86b1d23 default-disable mt on https-desktop-chrome 2022-08-18 19:01:33 +02:00
ed
b69a563fc2 gc massage 2022-08-18 18:03:33 +02:00
ed
a900c36395 v1.3.15 2022-08-18 01:02:19 +02:00
ed
1d9b324d3e explain w/a wasm leaks in workers (chrome bug) 2022-08-18 01:02:06 +02:00
ed
539e7b8efe help chrome gc by reusing one filereader 2022-08-18 00:05:32 +02:00
ed
50a477ee47 up2k-hook-ytid: upload into subdirs by id 2022-08-15 21:52:41 +02:00
ed
7000123a8b v1.3.14 2022-08-15 20:25:31 +02:00
ed
d48a7d2398 provide tagparsers with uploader info 2022-08-15 20:23:17 +02:00
ed
389a00ce59 v1.3.13 2022-08-15 19:11:21 +02:00
ed
7a460de3c2 windows db fix 2022-08-15 18:01:28 +02:00
ed
8ea1f4a751 idx multimedia format/container type 2022-08-15 17:56:13 +02:00
ed
1c69ccc6cd v1.3.12 2022-08-13 00:58:49 +02:00
ed
84b5bbd3b6 u2cli: bail from recursive symlinks + verbose errors 2022-08-13 00:28:08 +02:00
ed
9ccd327298 add directory hashing (boots ~3x faster) 2022-08-12 23:17:18 +02:00
ed
11df36f3cf add option to exit after scanning volumes 2022-08-12 21:20:13 +02:00
ed
f62dd0e3cc support fips-cpython and maybe make-sfx on macos 2022-08-12 16:36:20 +02:00
ed
ad18b6e15e stop reindexing empty files on startup 2022-08-12 16:31:36 +02:00
ed
c00b80ca29 v1.3.11 2022-08-10 23:35:21 +02:00
ed
92ed4ba3f8 parallelize python hashing too 2022-08-10 23:12:01 +02:00
ed
7de9775dd9 lol android 2022-08-10 20:35:12 +02:00
ed
5ce9060e5c up2k.js: do hashing in web-workers 2022-08-10 01:09:54 +02:00
ed
f727d5cb5a new cloudflare memes, thx nh 2022-08-09 09:00:22 +02:00
ed
4735fb1ebb u2cli: better msg on bad tls certs 2022-08-09 00:11:34 +02:00
ed
c7d05cc13d up2k-hook-ytid: log discovered IDs + support audio rips 2022-08-05 19:26:24 +02:00
ed
51c152ff4a indicate sqlite thread-safety + some cleanup 2022-08-05 01:20:16 +02:00
ed
eeed2a840c v1.3.10 2022-08-04 01:40:14 +02:00
ed
4aaa111925 v1.3.9 2022-08-04 00:39:37 +02:00
ed
e31248f018 include version info on startup and in crash dumps 2022-08-04 00:11:52 +02:00
ed
8b4cf022f2 bbox: tweak end-of-gallery animation 2022-08-03 22:56:51 +02:00
ed
4e7455268a tag-scanner perf 2022-08-03 22:33:20 +02:00
ed
680f8ae814 add xdev/xvol indexing guards 2022-08-03 22:20:28 +02:00
ed
90555a4cea clean-shutdown while hashing huge files 2022-08-03 21:06:10 +02:00
ed
56a62db591 force-exit by hammering ctrl-c 2022-08-03 20:58:23 +02:00
ed
cf51997680 fix make-sfx.sh on windows/msys2 2022-08-03 20:01:54 +02:00
ed
f05cc18d61 add missing polyfill 2022-08-03 19:42:42 +02:00
ed
5384c2e0f5 reentrant cleanup 2022-08-02 20:56:05 +02:00
ed
9bfbf80a0e ui: fix navpane covering files on horizontal scroll 2022-08-02 20:48:26 +02:00
ed
f874d7754f ui: toggle sorting folders before files (default-on) 2022-08-02 20:47:17 +02:00
ed
a669f79480 windows upload perf (fat32, smb) 2022-08-02 20:39:51 +02:00
ed
1c3894743a fix filekeys inside symlinked volumes 2022-08-02 20:26:51 +02:00
ed
75cdf17df4 cache sparsefile-support on windows too 2022-08-02 06:58:25 +02:00
ed
de7dd1e60a more visible upload errors on mobile 2022-08-02 06:17:13 +02:00
ed
0ee574a718 forget uploads that failed to initialize 2022-08-02 06:15:18 +02:00
ed
faac894706 oh 2022-07-29 00:13:18 +02:00
ed
dac2fad48e v1.3.8 2022-07-27 16:07:26 +02:00
ed
77f624b01e improve shumantime + use it everywhere 2022-07-27 15:07:04 +02:00
ed
e24ffebfc8 indicate write-activity on splashpage 2022-07-27 14:53:15 +02:00
ed
70d07d1609 perf 2022-07-27 14:01:30 +02:00
ed
bfb3303d87 include client total ETA in upload logs 2022-07-27 12:07:51 +02:00
ed
660705a436 defer volume reindexing on db activity 2022-07-27 11:48:47 +02:00
ed
74a3f97671 cleanup + bump deps 2022-07-27 00:15:49 +02:00
ed
b3e35bb494 async lsof w/ timeout 2022-07-26 22:38:13 +02:00
ed
76adac7c72 up2k-hook-ytid: add mp4/webm/mkv metadata scanner 2022-07-26 22:09:18 +02:00
ed
5dc75ebb67 async e2ts / e2v + forget deleted shadowed 2022-07-26 12:47:40 +02:00
ed
d686ce12b6 lsof db on stuck transaction 2022-07-25 02:07:59 +02:00
ed
d3c40a423e mutagen: support nullduration tags 2022-07-25 01:21:34 +02:00
ed
2fb1e6dab8 mute exception on zip abort 2022-07-25 01:20:38 +02:00
ed
10430b347f fix dumb prisonparty bug 2022-07-22 20:49:35 +02:00
ed
e0e3f6ac3e up2k-hook-ytid: add override 2022-07-22 10:47:10 +02:00
ed
c694cbffdc a11y: improve skip-to-files 2022-07-20 23:44:57 +02:00
ed
bdd0e5d771 a11y: enter = onclick 2022-07-20 23:32:02 +02:00
ed
aa98e427f0 audio-eq: add crossfeed 2022-07-20 01:54:59 +02:00
ed
daa6f4c94c add video hotkeys for digit-seeking 2022-07-17 23:45:02 +02:00
ed
4a76663fb2 ensure free disk space 2022-07-17 22:33:08 +02:00
ed
cebda5028a v1.3.7 2022-07-16 20:48:23 +02:00
ed
3fa377a580 sqlite diag 2022-07-16 20:43:26 +02:00
ed
a11c1005a8 v1.3.6 2022-07-16 03:58:58 +02:00
ed
4a6aea9328 hopefully got this right 2022-07-16 02:24:53 +02:00
ed
4ca041e93e improve autopotato accuracy 2022-07-16 02:23:50 +02:00
ed
52a866a405 batch progress writes 2022-07-16 02:12:56 +02:00
ed
8b6bd0e6ac rescue some exceptions from the promise maelstroms 2022-07-15 23:42:37 +02:00
ed
780fc4639a bbox: chrome doesnt override video onclick 2022-07-15 22:36:35 +02:00
ed
3692fc9d83 bbox: doubletap pic for fullscreen 2022-07-15 22:29:44 +02:00
ed
c2a0b1b4c6 autopotato 2022-07-15 02:39:32 +02:00
ed
21bbdb5419 fix audio-eq on recent chromes 2022-07-15 02:07:48 +02:00
ed
aa1c08962c golf 2022-07-15 02:07:13 +02:00
ed
8a5d0399dd sfx: dont hang supervisors 2022-07-15 02:04:00 +02:00
ed
f2cd0b0c4a sfx: avoid name collisions across reboots 2022-07-15 02:03:41 +02:00
ed
c2b66bbe73 add potato mode 2022-07-14 02:33:35 +02:00
ed
48b957f1d5 add -e2v (file integrity checker) 2022-07-13 00:48:39 +02:00
ed
3683984c8d abort volume indexing on ^C 2022-07-12 21:46:07 +02:00
ed
a3431512d8 push queue/status info to server 2022-07-12 21:22:02 +02:00
ed
d832b787e7 upload smallest-file-first by default 2022-07-12 20:48:38 +02:00
ed
6f75b02723 misc 2022-07-12 03:16:30 +02:00
ed
b8241710bd md-editor fixes 2022-07-12 02:53:33 +02:00
ed
d638404b6a better runahead strategy for 100 GiB+ files 2022-07-12 02:30:49 +02:00
ed
9362ca3ed9 py2 fixes 2022-07-11 23:53:18 +02:00
ed
d1a03c6d17 zerobyte semantics 2022-07-11 23:17:33 +02:00
ed
c6c31702c2 cheaper file deletion 2022-07-11 01:50:18 +02:00
ed
bd2d88c96e add another up2k-hook example 2022-07-11 00:52:59 +02:00
ed
76b1857e4e add support for up2k hooks 2022-07-09 14:02:35 +02:00
ed
095bd17d10 mtp/vidchk: grab some frames at the start too 2022-07-09 13:10:00 +02:00
ed
204bfac3fa mtp/vidchk: write ffprobe metadata to file 2022-07-09 04:33:19 +02:00
ed
ac49b0ca93 mtp: add rclone uploader 2022-07-08 23:47:27 +02:00
ed
c5b04f6fef mtp daisychaining 2022-07-08 22:29:05 +02:00
ed
5c58fda46d only clean thumbs if there are thumbs to clean 2022-07-08 21:13:10 +02:00
ed
062730c70c cleanup 2022-07-06 11:12:36 +02:00
ed
cade1990ce v1.3.5 2022-07-06 02:29:11 +02:00
ed
59b6e61816 build fstab from relabels when mtab is unreadable 2022-07-06 02:28:34 +02:00
ed
daff7ff158 v1.3.4 2022-07-06 00:12:10 +02:00
ed
0862860961 misc cleanup 2022-07-06 00:00:56 +02:00
ed
1cb24045a0 dont thumb empty files 2022-07-05 23:45:47 +02:00
ed
622358b172 flag to control mtp timeout kill behavior 2022-07-05 23:38:49 +02:00
ed
7998884a9d adopt the osd hider 2022-07-05 23:36:44 +02:00
ed
51ddecd101 improve readme 2022-07-05 23:27:48 +02:00
ed
7a35ab1d1e bbox: video seek / loop url params 2022-07-05 20:37:05 +02:00
ed
48564ba52a bbox: add A-B video loop 2022-07-05 19:53:43 +02:00
ed
49efffd740 bbox: tap left/right side of image for prev/next 2022-07-05 19:33:09 +02:00
ed
d6ac224c8f bbox: tap to show/hide buttons 2022-07-05 19:18:21 +02:00
ed
a772b8c3f2 bbox: add fullscreen for images too 2022-07-05 19:06:02 +02:00
ed
b580953dcd bbox: fix crash on swipe during close 2022-07-05 18:49:52 +02:00
ed
d86653c763 ux 2022-07-05 00:13:08 +02:00
ed
dded4fca76 option to specify favicon + default-enable it 2022-07-05 00:06:22 +02:00
ed
36365ffa6b explain the donut 2022-07-04 22:17:37 +02:00
ed
0f9aeeaa27 bump codemirror to 5.65.6 2022-07-04 22:15:52 +02:00
ed
d8ebcd0ef7 lol dpi 2022-07-04 22:13:28 +02:00
ed
6e445487b1 satisfy cloudflare DDoS protection 2022-07-03 16:04:28 +02:00
ed
6605e461c7 improve mtp section 2022-07-03 14:23:56 +02:00
ed
40ce4e2275 cleanup 2022-07-03 13:55:48 +02:00
ed
8fef9e363e recursive kill mtp on timeout 2022-07-03 04:57:15 +02:00
ed
4792c2770d fix a spin 2022-07-03 02:39:15 +02:00
ed
87bb49da36 new mtp: video integrity checker 2022-07-03 01:50:38 +02:00
ed
1c0071d9ce perf 2022-07-03 01:40:30 +02:00
ed
efded35c2e ffmpeg saying the fps is 1/0 yeah okay 2022-07-02 00:39:46 +02:00
ed
1d74240b9a ux: hide uploads table until something happens 2022-07-01 09:16:23 +02:00
ed
098184ff7b add write-only up2k ui simplifier 2022-07-01 00:55:36 +02:00
ed
4083533916 vt100 listing: reset color at eof 2022-06-29 22:41:51 +02:00
ed
feb1acd43a v1.3.3 2022-06-27 22:57:05 +02:00
ed
a9591db734 cleanup 2022-06-27 22:56:29 +02:00
ed
9ebf148cbe support android9 sdcardfs on sdcard 2022-06-27 22:15:35 +02:00
ed
a473e5e19a always include custom css/js 2022-06-27 17:24:30 +02:00
ed
5d3034c231 detect sparse support from st_blocks 2022-06-23 18:23:42 +02:00
ed
c3a895af64 android sdcardfs can be fat32 2022-06-23 16:27:30 +02:00
ed
cea5aecbf2 v1.3.2 2022-06-20 01:31:29 +02:00
ed
0e61e70670 audioplayer continues to next folder by default 2022-06-20 00:20:13 +02:00
ed
1e333c0939 fix doc traversal 2022-06-19 23:32:36 +02:00
ed
917b6ec03c naming 2022-06-19 22:58:20 +02:00
ed
fe67c52ead configurable list of sparse-supporting filesystems +
close nonsparse files after each write to force flush
2022-06-19 22:38:52 +02:00
ed
909c7bee3e ignore md plugin errors 2022-06-19 20:28:45 +02:00
ed
27ca54d138 md: ol appeared as ul 2022-06-19 19:05:41 +02:00
ed
2147c3a646 run markdown plugins in directory listings 2022-06-19 18:17:22 +02:00
ed
a99120116f ux: breadcrumb ctrl-click 2022-06-19 17:51:03 +02:00
ed
802efeaff2 dont let tags imply subdirectories when renaming 2022-06-19 16:06:39 +02:00
ed
9ad3af1ef6 misc tweaks 2022-06-19 16:05:48 +02:00
ed
715727b811 add changelog 2022-06-17 15:33:57 +02:00
ed
c6eaa7b836 aight good to know 2022-06-17 00:37:56 +02:00
ed
c2fceea2a5 v1.3.1 2022-06-16 21:56:12 +02:00
ed
190e11f7ea update deps + misc 2022-06-16 21:43:40 +02:00
ed
ad7413a5ff add .PARTIAL suffix to bup uploads too +
aggressive limits checking
2022-06-16 21:00:41 +02:00
ed
903b9e627a ux snappiness + keepalive on http-1.0 2022-06-16 20:33:09 +02:00
ed
c5c1e96cf8 ux: button to reset hidden columns 2022-06-16 19:06:28 +02:00
ed
62fbb04c9d allow moving files between filesystems 2022-06-16 18:46:50 +02:00
ed
728dc62d0b optimize nonsparse uploads (fat32, exfat, hpfs) 2022-06-16 17:51:42 +02:00
ed
2dfe1b1c6b add themes: hacker, hi-con 2022-06-16 12:21:21 +02:00
ed
35d4a1a6af ux: delay loading animation + focus outlines + explain ng 2022-06-16 11:02:05 +02:00
ed
eb3fa5aa6b add safety profiles + improve helptext + speed 2022-06-16 10:21:44 +02:00
ed
438384425a add types, isort, errorhandling 2022-06-16 01:07:15 +02:00
ed
0b6f102436 fix multiprocessing ftpd 2022-06-12 16:37:56 +02:00
ed
c9b7ec72d8 add hotkey Y to download current song / vid / pic 2022-06-09 17:23:11 +02:00
ed
256c7f1789 add option to see errors from mtp parsers 2022-06-09 14:46:35 +02:00
ed
4e5a323c62 more cleanup 2022-06-08 01:05:35 +02:00
ed
f4a3bbd237 fix ansify prepending bracket to all logfiles 2022-06-07 23:45:54 +02:00
ed
fe73f2d579 cleanup 2022-06-07 23:08:43 +02:00
ed
f79fcc7073 discover local ip under termux 2022-06-07 23:03:16 +02:00
ed
4c4b3790c7 fix read-spin on d/c during json post + errorhandling 2022-06-07 19:02:52 +02:00
ed
bd60b464bb fix misleading log-msg 2022-06-07 14:12:55 +02:00
ed
6bce852765 ux: treepar positioning 2022-06-06 22:05:13 +02:00
ed
3b19a5a59d improve a11y jumpers 2022-05-25 20:31:12 +02:00
ed
f024583011 add a11y jumpers 2022-05-24 09:09:54 +02:00
ed
1111baacb2 v1.3.0 2022-05-22 17:02:38 +02:00
ed
1b9c913efb update deps (marked, codemirror, prism) 2022-05-22 16:49:18 +02:00
ed
3524c36e1b tl 2022-05-22 16:04:10 +02:00
ed
cf87cea9f8 ux, tl 2022-05-21 11:32:25 +02:00
ed
bfa34404b8 ux tweaks 2022-05-19 18:00:33 +02:00
ed
0aba5f35bf add confirms on colhide, bigtxt 2022-05-19 17:59:33 +02:00
ed
663bc0842a ux 2022-05-18 19:51:25 +02:00
ed
7d10c96e73 grammar 2022-05-18 19:33:20 +02:00
ed
6b2720fab0 dont switch to treeview on play into next folder 2022-05-18 19:24:47 +02:00
ed
e74ad5132a persist videoplayer prefs 2022-05-18 19:17:21 +02:00
ed
1f6f89c1fd apply default-language to splashpage 2022-05-18 19:02:36 +02:00
ed
4d55e60980 update flat-light ss 2022-05-16 19:01:32 +02:00
ed
ddaaccd5af ux tweaks 2022-05-16 18:56:53 +02:00
ed
c20b7dac3d ah whatever, still 16 years left 2022-05-15 17:23:52 +02:00
ed
1f779d5094 zip: add ntfs and unix extensions for utc time 2022-05-15 16:13:49 +02:00
ed
715401ca8e fix timezone in search, zipfiles, fuse 2022-05-15 13:51:44 +02:00
ed
e7cd922d8b translate splashpage and search too 2022-05-15 13:20:52 +02:00
ed
187feee0c1 add norwegian translation 2022-05-14 23:25:40 +02:00
ed
49e962a7dc dbtool: faster, add examples,
match on hashes rather than paths by default,
add no-clobber option to keep existing tags
2022-05-14 12:44:05 +02:00
ed
633ff601e5 perf + ux 2022-05-14 00:13:06 +02:00
ed
331cf37054 show loading progress for huge documents 2022-05-13 23:02:20 +02:00
ed
23e4b9002f support ?doc=mojibake 2022-05-13 18:10:55 +02:00
ed
c0de3c8053 v1.2.11 2022-05-13 17:24:50 +02:00
ed
a82a3b084a make search results unselectable 2022-05-13 17:18:19 +02:00
ed
67c298e66b don't embed huge docs (defer to ajax), closes #9 2022-05-13 17:08:17 +02:00
ed
c110ccb9ae v1.2.10 2022-05-13 01:44:00 +02:00
ed
0143380306 help the query planner 2022-05-13 01:41:39 +02:00
ed
af9000d3c8 v1.2.9 2022-05-12 23:10:54 +02:00
ed
097d798e5e steal colors from monokai 2022-05-12 23:06:37 +02:00
ed
1d9f9f221a louder 2022-05-12 20:55:37 +02:00
ed
214a367f48 be loud about segfaults and such 2022-05-12 20:26:48 +02:00
ed
2fb46551a2 avoid pointless recursion + show scan summary 2022-05-09 23:43:59 +02:00
ed
6bcf330ae0 symlink-checker: print base vpath in nonverbose mode 2022-05-09 20:17:03 +00:00
ed
2075a8b18c skip nonregular files when indexing filesystem 2022-05-09 19:56:17 +00:00
ed
1275ac6c42 start up2k indexing even if no interfaces could bind 2022-05-09 20:38:06 +02:00
ed
708f20b7af remove option to disable spa 2022-05-08 14:29:05 +02:00
ed
a2c0c708e8 focus password field if not logged in 2022-05-07 22:16:12 +02:00
ed
2f2c65d91e improve up2k error messages 2022-05-07 22:15:09 +02:00
ed
cd5fcc7ca7 fix file sel/play background on focus 2022-05-06 21:15:18 +02:00
ed
aa29e7be48 minimal support for browsers without css-variables 2022-05-03 00:52:26 +02:00
ed
93febe34b0 truncate huge ffmpeg errors 2022-05-03 00:32:00 +02:00
ed
f086e6d3c1 best-effort recovery when chrome desyncs the mediaSession 2022-05-02 19:08:37 +02:00
ed
22e51e1c96 compensate for play/pause fades by rewinding a bit 2022-05-02 19:07:16 +02:00
ed
63a5336f31 change modal ok/cancel focus with left/right keys 2022-05-02 19:06:51 +02:00
ed
bfc6c53cc5 ux 2022-05-02 19:06:08 +02:00
ed
236017f310 better dropzones on small screens 2022-05-02 01:08:31 +02:00
ed
0a1d9b4dfd nevermind, not reliable when rproxied 2022-05-01 22:35:34 +02:00
ed
b50d090946 add logout on inactivity + related errorhandling 2022-05-01 22:12:25 +02:00
ed
00b5db52cf notes 2022-05-01 12:02:27 +02:00
ed
24cb30e2c5 support login from ie4 / win3.11 2022-05-01 11:42:19 +02:00
ed
4549145ab5 fix filekeys in basic-html browser 2022-05-01 11:29:51 +02:00
ed
67b0217754 cleanup + readme 2022-04-30 23:37:27 +02:00
ed
ccae9efdf0 safer systemd example (unprivileged user + NAT for port 80 / 443) 2022-04-30 23:28:51 +02:00
ed
59d596b222 add service to autogenerate TLS certificates 2022-04-30 22:54:35 +02:00
ed
4878eb2c45 support symlinks as volume root 2022-04-30 20:26:26 +02:00
ed
7755392f57 redirect to webroot after login 2022-04-30 18:15:09 +02:00
ed
dc2ea20959 v1.2.8 2022-04-30 02:16:34 +02:00
ed
8eaea2bd17 ux 2022-04-30 00:37:31 +02:00
ed
58e559918f fix dynamic tree sizing 2022-04-30 00:04:06 +02:00
ed
f38a3fca5b case-insensitive cover check 2022-04-29 23:39:16 +02:00
ed
1ea145b384 wow when did that break 2022-04-29 23:37:38 +02:00
ed
0d9567575a avoid hashing busy uploads during rescan 2022-04-29 23:16:23 +02:00
ed
e82f176289 fix deadlock on rescan during upload 2022-04-29 23:14:51 +02:00
ed
d4b51c040e doc + ux 2022-04-29 23:13:37 +02:00
ed
125d0efbd8 good stuff 2022-04-29 02:06:56 +02:00
ed
3215afc504 immediately search on enter key 2022-04-28 22:53:37 +02:00
ed
c73ff3ce1b avoid sqlite deadlock on windows 2022-04-28 22:46:53 +02:00
ed
f9c159a051 add option to force up2k turbo + hide warning 2022-04-28 21:57:37 +02:00
ed
2ab1325c90 add option to load more search results 2022-04-28 21:55:01 +02:00
ed
5b0f7ff506 perfect 2022-04-28 10:36:56 +02:00
ed
9269bc84f2 skip more stuff windows doesn't like 2022-04-28 10:31:10 +02:00
ed
4e8b651e18 too much effort into this joke 2022-04-28 10:29:54 +02:00
ed
65b4f79534 add themes "vice" and "hot dog stand" 2022-04-27 22:33:01 +02:00
ed
5dd43dbc45 ignore bugs in chrome v102 2022-04-27 22:32:11 +02:00
ed
5f73074c7e fix audio playback on first visit 2022-04-27 22:31:33 +02:00
ed
f5d6ba27b2 handle invalid headers better 2022-04-27 22:30:19 +02:00
ed
73fa70b41f fix mostly-harmless xss 2022-04-27 22:29:16 +02:00
ed
2a1cda42e7 avoid deadlocks on windows 2022-04-27 22:27:49 +02:00
ed
1bd7e31466 more theme porting 2022-04-26 00:42:00 +02:00
ed
eb49e1fb4a conditional up2k column sizes depending on card 2022-04-24 23:48:23 +02:00
ed
9838c2f0ce golf 2022-04-24 23:47:15 +02:00
ed
6041df8370 start replacing class-scopes with css variables 2022-04-24 23:46:38 +02:00
ed
2933dce3ef mtime blank uploads + helptext 2022-04-24 22:58:11 +02:00
222 changed files with 33309 additions and 6751 deletions

2
.github/pull_request_template.md vendored Normal file
View File

@@ -0,0 +1,2 @@
Please include the following text somewhere in this PR description:
This PR complies with the DCO; https://developercertificate.org/

27
.gitignore vendored
View File

@@ -5,23 +5,38 @@ __pycache__/
MANIFEST.in
MANIFEST
copyparty.egg-info/
buildenv/
build/
dist/
sfx/
py2/
.venv/
/buildenv/
/build/
/dist/
/py2/
/sfx*
/unt/
/log/
# ide
*.sublime-workspace
# winmerge
*.bak
# apple pls
.DS_Store
# derived
copyparty/res/COPYING.txt
copyparty/web/deps/
srv/
scripts/docker/i/
contrib/package/arch/pkg/
contrib/package/arch/src/
# state/logs
up.*.txt
.hist/
.hist/
scripts/docker/*.out
scripts/docker/*.err
# nix build output link
result

1
.vscode/launch.json vendored
View File

@@ -8,6 +8,7 @@
"module": "copyparty",
"console": "integratedTerminal",
"cwd": "${workspaceFolder}",
"justMyCode": false,
"args": [
//"-nw",
"-ed",

8
.vscode/launch.py vendored Normal file → Executable file
View File

@@ -1,3 +1,5 @@
#!/usr/bin/env python3
# takes arguments from launch.json
# is used by no_dbg in tasks.json
# launches 10x faster than mspython debugpy
@@ -9,15 +11,15 @@ import sys
print(sys.executable)
import json5
import shlex
import jstyleson
import subprocess as sp
with open(".vscode/launch.json", "r", encoding="utf-8") as f:
tj = f.read()
oj = jstyleson.loads(tj)
oj = json5.loads(tj)
argv = oj["configurations"][0]["args"]
try:
@@ -28,6 +30,8 @@ except:
argv = [os.path.expanduser(x) if x.startswith("~") else x for x in argv]
argv += sys.argv[1:]
if re.search(" -j ?[0-9]", " ".join(argv)):
argv = [sys.executable, "-m", "copyparty"] + argv
sp.check_call(argv)

28
.vscode/settings.json vendored
View File

@@ -23,7 +23,6 @@
"terminal.ansiBrightWhite": "#ffffff",
},
"python.testing.pytestEnabled": false,
"python.testing.nosetestsEnabled": false,
"python.testing.unittestEnabled": true,
"python.testing.unittestArgs": [
"-v",
@@ -35,17 +34,42 @@
"python.linting.pylintEnabled": true,
"python.linting.flake8Enabled": true,
"python.linting.banditEnabled": true,
"python.linting.mypyEnabled": true,
"python.linting.mypyArgs": [
"--ignore-missing-imports",
"--follow-imports=silent",
"--show-column-numbers",
"--strict"
],
"python.linting.flake8Args": [
"--max-line-length=120",
"--ignore=E722,F405,E203,W503,W293,E402",
"--ignore=E722,F405,E203,W503,W293,E402,E501,E128",
],
"python.linting.banditArgs": [
"--ignore=B104"
],
"python.linting.pylintArgs": [
"--disable=missing-module-docstring",
"--disable=missing-class-docstring",
"--disable=missing-function-docstring",
"--disable=import-outside-toplevel",
"--disable=wrong-import-position",
"--disable=raise-missing-from",
"--disable=bare-except",
"--disable=broad-except",
"--disable=invalid-name",
"--disable=line-too-long",
"--disable=consider-using-f-string"
],
// python3 -m isort --py=27 --profile=black copyparty/
"python.formatting.provider": "black",
"editor.formatOnSave": true,
"[html]": {
"editor.formatOnSave": false,
"editor.autoIndent": "keep",
},
"[css]": {
"editor.formatOnSave": false,
},
"files.associations": {
"*.makefile": "makefile"

1205
README.md

File diff suppressed because it is too large Load Diff

9
SECURITY.md Normal file
View File

@@ -0,0 +1,9 @@
# Security Policy
if you hit something extra juicy pls let me know on either of the following
* email -- `copyparty@ocv.ze` except `ze` should be `me`
* [mastodon dm](https://layer8.space/@tripflag) -- `@tripflag@layer8.space`
* [github private vulnerability report](https://github.com/9001/copyparty/security/advisories/new), wow that form is complicated
* [twitter dm](https://twitter.com/tripflag) (if im somehow not banned yet)
no bug bounties sorry! all i can offer is greetz in the release notes

View File

@@ -1,7 +1,8 @@
# [`up2k.py`](up2k.py)
* command-line up2k client [(webm)](https://ocv.me/stuff/u2cli.webm)
* file uploads, file-search, autoresume of aborted/broken uploads
* faster than browsers
* sync local folder to server
* generally faster than browsers
* if something breaks just restart it
@@ -11,7 +12,7 @@ produces a chronological list of all uploads by collecting info from up2k databa
* optional mapping from IP-addresses to nicknames
# [`copyparty-fuse.py`](copyparty-fuse.py)
# [`partyfuse.py`](partyfuse.py)
* mount a copyparty server as a local filesystem (read-only)
* **supports Windows!** -- expect `194 MiB/s` sequential read
* **supports Linux** -- expect `117 MiB/s` sequential read
@@ -30,19 +31,19 @@ also consider using [../docs/rclone.md](../docs/rclone.md) instead for 5x perfor
* install [winfsp](https://github.com/billziss-gh/winfsp/releases/latest) and [python 3](https://www.python.org/downloads/)
* [x] add python 3.x to PATH (it asks during install)
* `python -m pip install --user fusepy`
* `python ./copyparty-fuse.py n: http://192.168.1.69:3923/`
* `python ./partyfuse.py n: http://192.168.1.69:3923/`
10% faster in [msys2](https://www.msys2.org/), 700% faster if debug prints are enabled:
* `pacman -S mingw64/mingw-w64-x86_64-python{,-pip}`
* `/mingw64/bin/python3 -m pip install --user fusepy`
* `/mingw64/bin/python3 ./copyparty-fuse.py [...]`
* `/mingw64/bin/python3 ./partyfuse.py [...]`
you could replace winfsp with [dokan](https://github.com/dokan-dev/dokany/releases/latest), let me know if you [figure out how](https://github.com/dokan-dev/dokany/wiki/FUSE)
(winfsp's sshfs leaks, doesn't look like winfsp itself does, should be fine)
# [`copyparty-fuse🅱️.py`](copyparty-fuseb.py)
# [`partyfuse2.py`](partyfuse2.py)
* mount a copyparty server as a local filesystem (read-only)
* does the same thing except more correct, `samba` approves
* **supports Linux** -- expect `18 MiB/s` (wait what)
@@ -50,7 +51,7 @@ you could replace winfsp with [dokan](https://github.com/dokan-dev/dokany/releas
# [`copyparty-fuse-streaming.py`](copyparty-fuse-streaming.py)
# [`partyfuse-streaming.py`](partyfuse-streaming.py)
* pretend this doesn't exist

View File

@@ -8,7 +8,10 @@ import sqlite3
import argparse
DB_VER1 = 3
DB_VER2 = 4
DB_VER2 = 5
BY_PATH = None
NC = None
def die(msg):
@@ -57,8 +60,13 @@ def compare(n1, d1, n2, d2, verbose):
if rd.split("/", 1)[0] == ".hist":
continue
q = "select w from up where rd = ? and fn = ?"
hit = d2.execute(q, (rd, fn)).fetchone()
if BY_PATH:
q = "select w from up where rd = ? and fn = ?"
hit = d2.execute(q, (rd, fn)).fetchone()
else:
q = "select w from up where substr(w,1,16) = ? and +w = ?"
hit = d2.execute(q, (w1[:16], w1)).fetchone()
if not hit:
miss += 1
if verbose:
@@ -70,27 +78,32 @@ def compare(n1, d1, n2, d2, verbose):
n = 0
miss = {}
nmiss = 0
for w1, k, v in d1.execute("select * from mt"):
for w1s, k, v in d1.execute("select * from mt"):
n += 1
if n % 100_000 == 0:
m = f"\033[36mchecked {n:,} of {nt:,} tags in {n1} against {n2}, so far {nmiss} missing tags\033[0m"
print(m)
q = "select rd, fn from up where substr(w,1,16) = ?"
rd, fn = d1.execute(q, (w1,)).fetchone()
q = "select w, rd, fn from up where substr(w,1,16) = ?"
w1, rd, fn = d1.execute(q, (w1s,)).fetchone()
if rd.split("/", 1)[0] == ".hist":
continue
q = "select substr(w,1,16) from up where rd = ? and fn = ?"
w2 = d2.execute(q, (rd, fn)).fetchone()
if BY_PATH:
q = "select w from up where rd = ? and fn = ?"
w2 = d2.execute(q, (rd, fn)).fetchone()
else:
q = "select w from up where substr(w,1,16) = ? and +w = ?"
w2 = d2.execute(q, (w1s, w1)).fetchone()
if w2:
w2 = w2[0]
v2 = None
if w2:
v2 = d2.execute(
"select v from mt where w = ? and +k = ?", (w2, k)
"select v from mt where w = ? and +k = ?", (w2[:16], k)
).fetchone()
if v2:
v2 = v2[0]
@@ -124,7 +137,7 @@ def compare(n1, d1, n2, d2, verbose):
for k, v in sorted(miss.items()):
if v:
print(f"{n1} has {v:6} more {k:<6} tags than {n2}")
print(f"{n1} has {v:7} more {k:<7} tags than {n2}")
print(f"in total, {nmiss} missing tags in {n2}\n")
@@ -132,47 +145,75 @@ def compare(n1, d1, n2, d2, verbose):
def copy_mtp(d1, d2, tag, rm):
nt = next(d1.execute("select count(w) from mt where k = ?", (tag,)))[0]
n = 0
ndone = 0
for w1, k, v in d1.execute("select * from mt where k = ?", (tag,)):
ncopy = 0
nskip = 0
for w1s, k, v in d1.execute("select * from mt where k = ?", (tag,)):
n += 1
if n % 25_000 == 0:
m = f"\033[36m{n:,} of {nt:,} tags checked, so far {ndone} copied\033[0m"
m = f"\033[36m{n:,} of {nt:,} tags checked, so far {ncopy} copied, {nskip} skipped\033[0m"
print(m)
q = "select rd, fn from up where substr(w,1,16) = ?"
rd, fn = d1.execute(q, (w1,)).fetchone()
q = "select w, rd, fn from up where substr(w,1,16) = ?"
w1, rd, fn = d1.execute(q, (w1s,)).fetchone()
if rd.split("/", 1)[0] == ".hist":
continue
q = "select substr(w,1,16) from up where rd = ? and fn = ?"
w2 = d2.execute(q, (rd, fn)).fetchone()
if BY_PATH:
q = "select w from up where rd = ? and fn = ?"
w2 = d2.execute(q, (rd, fn)).fetchone()
else:
q = "select w from up where substr(w,1,16) = ? and +w = ?"
w2 = d2.execute(q, (w1s, w1)).fetchone()
if not w2:
continue
w2 = w2[0]
hit = d2.execute("select v from mt where w = ? and +k = ?", (w2, k)).fetchone()
w2s = w2[0][:16]
hit = d2.execute("select v from mt where w = ? and +k = ?", (w2s, k)).fetchone()
if hit:
hit = hit[0]
if hit != v:
ndone += 1
if hit is not None:
d2.execute("delete from mt where w = ? and +k = ?", (w2, k))
if NC and hit is not None:
nskip += 1
continue
d2.execute("insert into mt values (?,?,?)", (w2, k, v))
ncopy += 1
if hit is not None:
d2.execute("delete from mt where w = ? and +k = ?", (w2s, k))
d2.execute("insert into mt values (?,?,?)", (w2s, k, v))
if rm:
d2.execute("delete from mt where w = ? and +k = 't:mtp'", (w2,))
d2.execute("delete from mt where w = ? and +k = 't:mtp'", (w2s,))
d2.commit()
print(f"copied {ndone} {tag} tags over")
print(f"copied {ncopy} {tag} tags over, skipped {nskip}")
def examples():
print(
"""
# clearing the journal
./dbtool.py up2k.db
# copy tags ".bpm" and "key" from old.db to up2k.db, and remove the mtp flag from matching files (so copyparty won't run any mtps on it)
./dbtool.py -ls up2k.db
./dbtool.py -src old.db up2k.db -cmp
./dbtool.py -src old.v3 up2k.db -rm-mtp-flag -copy key
./dbtool.py -src old.v3 up2k.db -rm-mtp-flag -copy .bpm -vac
"""
)
def main():
global NC, BY_PATH
os.system("")
print()
ap = argparse.ArgumentParser()
ap.add_argument("db", help="database to work on")
ap.add_argument("-h2", action="store_true", help="show examples")
ap.add_argument("-src", metavar="DB", type=str, help="database to copy from")
ap2 = ap.add_argument_group("informational / read-only stuff")
@@ -185,11 +226,29 @@ def main():
ap2.add_argument(
"-rm-mtp-flag",
action="store_true",
help="when an mtp tag is copied over, also mark that as done, so copyparty won't run mtp on it",
help="when an mtp tag is copied over, also mark that file as done, so copyparty won't run any mtps on those files",
)
ap2.add_argument("-vac", action="store_true", help="optimize DB")
ap2 = ap.add_argument_group("behavior modifiers")
ap2.add_argument(
"-nc",
action="store_true",
help="no-clobber; don't replace/overwrite existing tags",
)
ap2.add_argument(
"-by-path",
action="store_true",
help="match files based on location rather than warks (content-hash), use this if the databases have different wark salts",
)
ar = ap.parse_args()
if ar.h2:
examples()
return
NC = ar.nc
BY_PATH = ar.by_path
for v in [ar.db, ar.src]:
if v and not os.path.exists(v):

29
bin/hooks/README.md Normal file
View File

@@ -0,0 +1,29 @@
standalone programs which are executed by copyparty when an event happens (upload, file rename, delete, ...)
these programs either take zero arguments, or a filepath (the affected file), or a json message with filepath + additional info
run copyparty with `--help-hooks` for usage details / hook type explanations (xbu/xau/xiu/xbr/xar/xbd/xad)
> **note:** in addition to event hooks (the stuff described here), copyparty has another api to run your programs/scripts while providing way more information such as audio tags / video codecs / etc and optionally daisychaining data between scripts in a processing pipeline; if that's what you want then see [mtp plugins](../mtag/) instead
# after upload
* [notify.py](notify.py) shows a desktop notification ([example](https://user-images.githubusercontent.com/241032/215335767-9c91ed24-d36e-4b6b-9766-fb95d12d163f.png))
* [notify2.py](notify2.py) uses the json API to show more context
* [image-noexif.py](image-noexif.py) removes image exif by overwriting / directly editing the uploaded file
* [discord-announce.py](discord-announce.py) announces new uploads on discord using webhooks ([example](https://user-images.githubusercontent.com/241032/215304439-1c1cb3c8-ec6f-4c17-9f27-81f969b1811a.png))
* [reject-mimetype.py](reject-mimetype.py) rejects uploads unless the mimetype is acceptable
# upload batches
these are `--xiu` hooks; unlike `xbu` and `xau` (which get executed on every single file), `xiu` hooks are given a list of recent uploads on STDIN after the server has gone idle for N seconds, reducing server load + providing more context
* [xiu.py](xiu.py) is a "minimal" example showing a list of filenames + total filesize
* [xiu-sha.py](xiu-sha.py) produces a sha512 checksum list in the volume root
# before upload
* [reject-extension.py](reject-extension.py) rejects uploads if they match a list of file extensions
# on message
* [wget.py](wget.py) lets you download files by POSTing URLs to copyparty

68
bin/hooks/discord-announce.py Executable file
View File

@@ -0,0 +1,68 @@
#!/usr/bin/env python3
import sys
import json
import requests
from copyparty.util import humansize, quotep
_ = r"""
announces a new upload on discord
example usage as global config:
--xau f,t5,j,bin/hooks/discord-announce.py
example usage as a volflag (per-volume config):
-v srv/inc:inc:r:rw,ed:c,xau=f,t5,j,bin/hooks/discord-announce.py
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
(share filesystem-path srv/inc as volume /inc,
readable by everyone, read-write for user 'ed',
running this plugin on all uploads with the params listed below)
parameters explained,
xbu = execute after upload
f = fork; don't wait for it to finish
t5 = timeout if it's still running after 5 sec
j = provide upload information as json; not just the filename
replace "xau" with "xbu" to announce Before upload starts instead of After completion
# how to discord:
first create the webhook url; https://support.discord.com/hc/en-us/articles/228383668-Intro-to-Webhooks
then use this to design your message: https://discohook.org/
"""
def main():
WEBHOOK = "https://discord.com/api/webhooks/1234/base64"
WEBHOOK = "https://discord.com/api/webhooks/1066830390280597718/M1TDD110hQA-meRLMRhdurych8iyG35LDoI1YhzbrjGP--BXNZodZFczNVwK4Ce7Yme5"
# read info from copyparty
inf = json.loads(sys.argv[1])
vpath = inf["vp"]
filename = vpath.split("/")[-1]
url = f"https://{inf['host']}/{quotep(vpath)}"
# compose the message to discord
j = {
"title": filename,
"url": url,
"description": url.rsplit("/", 1)[0],
"color": 0x449900,
"fields": [
{"name": "Size", "value": humansize(inf["sz"])},
{"name": "User", "value": inf["user"]},
{"name": "IP", "value": inf["ip"]},
],
}
for v in j["fields"]:
v["inline"] = True
r = requests.post(WEBHOOK, json={"embeds": [j]})
print(f"discord: {r}\n", end="")
if __name__ == "__main__":
main()

72
bin/hooks/image-noexif.py Executable file
View File

@@ -0,0 +1,72 @@
#!/usr/bin/env python3
import os
import sys
import subprocess as sp
_ = r"""
remove exif tags from uploaded images; the eventhook edition of
https://github.com/9001/copyparty/blob/hovudstraum/bin/mtag/image-noexif.py
dependencies:
exiftool / perl-Image-ExifTool
being an upload hook, this will take effect after upload completion
but before copyparty has hashed/indexed the file, which means that
copyparty will never index the original file, so deduplication will
not work as expected... which is mostly OK but ehhh
note: modifies the file in-place, so don't set the `f` (fork) flag
example usages; either as global config (all volumes) or as volflag:
--xau bin/hooks/image-noexif.py
-v srv/inc:inc:r:rw,ed:c,xau=bin/hooks/image-noexif.py
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
explained:
share fs-path srv/inc at /inc (readable by all, read-write for user ed)
running this xau (execute-after-upload) plugin for all uploaded files
"""
# filetypes to process; ignores everything else
EXTS = ("jpg", "jpeg", "avif", "heif", "heic")
try:
from copyparty.util import fsenc
except:
def fsenc(p):
return p.encode("utf-8")
def main():
fp = sys.argv[1]
ext = fp.lower().split(".")[-1]
if ext not in EXTS:
return
cwd, fn = os.path.split(fp)
os.chdir(cwd)
f1 = fsenc(fn)
cmd = [
b"exiftool",
b"-exif:all=",
b"-iptc:all=",
b"-xmp:all=",
b"-P",
b"-overwrite_original",
b"--",
f1,
]
sp.check_output(cmd)
print("image-noexif: stripped")
if __name__ == "__main__":
try:
main()
except:
pass

66
bin/hooks/notify.py Executable file
View File

@@ -0,0 +1,66 @@
#!/usr/bin/env python3
import os
import sys
import subprocess as sp
from plyer import notification
_ = r"""
show os notification on upload; works on windows, linux, macos, android
depdencies:
windows: python3 -m pip install --user -U plyer
linux: python3 -m pip install --user -U plyer
macos: python3 -m pip install --user -U plyer pyobjus
android: just termux and termux-api
example usages; either as global config (all volumes) or as volflag:
--xau f,bin/hooks/notify.py
-v srv/inc:inc:r:rw,ed:c,xau=f,bin/hooks/notify.py
^^^^^^^^^^^^^^^^^^^^^^^^^^^
(share filesystem-path srv/inc as volume /inc,
readable by everyone, read-write for user 'ed',
running this plugin on all uploads with the params listed below)
parameters explained,
xau = execute after upload
f = fork so it doesn't block uploads
"""
try:
from copyparty.util import humansize
except:
def humansize(n):
return n
def main():
fp = sys.argv[1]
dp, fn = os.path.split(fp)
try:
sz = humansize(os.path.getsize(fp))
except:
sz = "?"
msg = "{} ({})\n📁 {}".format(fn, sz, dp)
title = "File received"
if "com.termux" in sys.executable:
sp.run(["termux-notification", "-t", title, "-c", msg])
return
icon = "emblem-documents-symbolic" if sys.platform == "linux" else ""
notification.notify(
title=title,
message=msg,
app_icon=icon,
timeout=10,
)
if __name__ == "__main__":
main()

72
bin/hooks/notify2.py Executable file
View File

@@ -0,0 +1,72 @@
#!/usr/bin/env python3
import json
import os
import sys
import subprocess as sp
from datetime import datetime
from plyer import notification
_ = r"""
same as notify.py but with additional info (uploader, ...)
and also supports --xm (notify on 📟 message)
example usages; either as global config (all volumes) or as volflag:
--xm f,j,bin/hooks/notify2.py
--xau f,j,bin/hooks/notify2.py
-v srv/inc:inc:r:rw,ed:c,xm=f,j,bin/hooks/notify2.py
-v srv/inc:inc:r:rw,ed:c,xau=f,j,bin/hooks/notify2.py
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
(share filesystem-path srv/inc as volume /inc,
readable by everyone, read-write for user 'ed',
running this plugin on all uploads / msgs with the params listed below)
parameters explained,
xau = execute after upload
f = fork so it doesn't block uploads
j = provide json instead of filepath list
"""
try:
from copyparty.util import humansize
except:
def humansize(n):
return n
def main():
inf = json.loads(sys.argv[1])
fp = inf["ap"]
sz = humansize(inf["sz"])
dp, fn = os.path.split(fp)
mt = datetime.utcfromtimestamp(inf["mt"]).strftime("%Y-%m-%d %H:%M:%S")
msg = f"{fn} ({sz})\n📁 {dp}"
title = "File received"
icon = "emblem-documents-symbolic" if sys.platform == "linux" else ""
if inf.get("txt"):
msg = inf["txt"]
title = "Message received"
icon = "mail-unread-symbolic" if sys.platform == "linux" else ""
msg += f"\n👤 {inf['user']} ({inf['ip']})\n🕒 {mt}"
if "com.termux" in sys.executable:
sp.run(["termux-notification", "-t", title, "-c", msg])
return
notification.notify(
title=title,
message=msg,
app_icon=icon,
timeout=10,
)
if __name__ == "__main__":
main()

35
bin/hooks/reject-extension.py Executable file
View File

@@ -0,0 +1,35 @@
#!/usr/bin/env python3
import sys
_ = r"""
reject file uploads by file extension
example usage as global config:
--xbu c,bin/hooks/reject-extension.py
example usage as a volflag (per-volume config):
-v srv/inc:inc:r:rw,ed:c,xbu=c,bin/hooks/reject-extension.py
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
(share filesystem-path srv/inc as volume /inc,
readable by everyone, read-write for user 'ed',
running this plugin on all uploads with the params listed below)
parameters explained,
xbu = execute before upload
c = check result, reject upload if error
"""
def main():
bad = "exe scr com pif bat ps1 jar msi"
ext = sys.argv[1].split(".")[-1]
sys.exit(1 if ext in bad.split() else 0)
if __name__ == "__main__":
main()

44
bin/hooks/reject-mimetype.py Executable file
View File

@@ -0,0 +1,44 @@
#!/usr/bin/env python3
import sys
import magic
_ = r"""
reject file uploads by mimetype
dependencies (linux, macos):
python3 -m pip install --user -U python-magic
dependencies (windows):
python3 -m pip install --user -U python-magic-bin
example usage as global config:
--xau c,bin/hooks/reject-mimetype.py
example usage as a volflag (per-volume config):
-v srv/inc:inc:r:rw,ed:c,xau=c,bin/hooks/reject-mimetype.py
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
(share filesystem-path srv/inc as volume /inc,
readable by everyone, read-write for user 'ed',
running this plugin on all uploads with the params listed below)
parameters explained,
xau = execute after upload
c = check result, reject upload if error
"""
def main():
ok = ["image/jpeg", "image/png"]
mt = magic.from_file(sys.argv[1], mime=True)
print(mt)
sys.exit(1 if mt not in ok else 0)
if __name__ == "__main__":
main()

60
bin/hooks/wget.py Executable file
View File

@@ -0,0 +1,60 @@
#!/usr/bin/env python3
import os
import sys
import json
import subprocess as sp
_ = r"""
use copyparty as a file downloader by POSTing URLs as
application/x-www-form-urlencoded (for example using the
message/pager function on the website)
example usage as global config:
--xm f,j,t3600,bin/hooks/wget.py
example usage as a volflag (per-volume config):
-v srv/inc:inc:r:rw,ed:c,xm=f,j,t3600,bin/hooks/wget.py
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
(share filesystem-path srv/inc as volume /inc,
readable by everyone, read-write for user 'ed',
running this plugin on all messages with the params listed below)
parameters explained,
xm = execute on message-to-server-log
f = fork so it doesn't block uploads
j = provide message information as json; not just the text
c3 = mute all output
t3600 = timeout and kill download after 1 hour
"""
def main():
inf = json.loads(sys.argv[1])
url = inf["txt"]
if "://" not in url:
url = "https://" + url
os.chdir(inf["ap"])
name = url.split("?")[0].split("/")[-1]
tfn = "-- DOWNLOADING " + name
print(f"{tfn}\n", end="")
open(tfn, "wb").close()
cmd = ["wget", "--trust-server-names", "-nv", "--", url]
try:
sp.check_call(cmd)
except:
t = "-- FAILED TO DONWLOAD " + name
print(f"{t}\n", end="")
open(t, "wb").close()
os.unlink(tfn)
if __name__ == "__main__":
main()

108
bin/hooks/xiu-sha.py Executable file
View File

@@ -0,0 +1,108 @@
#!/usr/bin/env python3
import hashlib
import json
import sys
from datetime import datetime
_ = r"""
this hook will produce a single sha512 file which
covers all recent uploads (plus metadata comments)
use this with --xiu, which makes copyparty buffer
uploads until server is idle, providing file infos
on stdin (filepaths or json)
example usage as global config:
--xiu i5,j,bin/hooks/xiu-sha.py
example usage as a volflag (per-volume config):
-v srv/inc:inc:r:rw,ed:c,xiu=i5,j,bin/hooks/xiu-sha.py
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
(share filesystem-path srv/inc as volume /inc,
readable by everyone, read-write for user 'ed',
running this plugin on batches of uploads with the params listed below)
parameters explained,
xiu = execute after uploads...
i5 = ...after volume has been idle for 5sec
j = provide json instead of filepath list
note the "f" (fork) flag is not set, so this xiu
will block other xiu hooks while it's running
"""
try:
from copyparty.util import fsenc
except:
def fsenc(p):
return p
def humantime(ts):
return datetime.utcfromtimestamp(ts).strftime("%Y-%m-%d %H:%M:%S")
def find_files_root(inf):
di = 9000
for f1, f2 in zip(inf, inf[1:]):
p1 = f1["ap"].replace("\\", "/").rsplit("/", 1)[0]
p2 = f2["ap"].replace("\\", "/").rsplit("/", 1)[0]
di = min(len(p1), len(p2), di)
di = next((i for i in range(di) if p1[i] != p2[i]), di)
return di + 1
def find_vol_root(inf):
return len(inf[0]["ap"][: -len(inf[0]["vp"])])
def main():
zb = sys.stdin.buffer.read()
zs = zb.decode("utf-8", "replace")
inf = json.loads(zs)
# root directory (where to put the sha512 file);
# di = find_files_root(inf) # next to the file closest to volume root
di = find_vol_root(inf) # top of the entire volume
ret = []
total_sz = 0
for md in inf:
ap = md["ap"]
rp = ap[di:]
total_sz += md["sz"]
fsize = "{:,}".format(md["sz"])
mtime = humantime(md["mt"])
up_ts = humantime(md["at"])
h = hashlib.sha512()
with open(fsenc(md["ap"]), "rb", 512 * 1024) as f:
while True:
buf = f.read(512 * 1024)
if not buf:
break
h.update(buf)
cksum = h.hexdigest()
meta = " | ".join([md["wark"], up_ts, mtime, fsize, md["ip"]])
ret.append("# {}\n{} *{}".format(meta, cksum, rp))
ret.append("# {} files, {} bytes total".format(len(inf), total_sz))
ret.append("")
ftime = datetime.utcnow().strftime("%Y-%m%d-%H%M%S.%f")
fp = "{}xfer-{}.sha512".format(inf[0]["ap"][:di], ftime)
with open(fsenc(fp), "wb") as f:
f.write("\n".join(ret).encode("utf-8", "replace"))
print("wrote checksums to {}".format(fp))
if __name__ == "__main__":
main()

50
bin/hooks/xiu.py Executable file
View File

@@ -0,0 +1,50 @@
#!/usr/bin/env python3
import json
import sys
_ = r"""
this hook prints absolute filepaths + total size
use this with --xiu, which makes copyparty buffer
uploads until server is idle, providing file infos
on stdin (filepaths or json)
example usage as global config:
--xiu i1,j,bin/hooks/xiu.py
example usage as a volflag (per-volume config):
-v srv/inc:inc:r:rw,ed:c,xiu=i1,j,bin/hooks/xiu.py
^^^^^^^^^^^^^^^^^^^^^^^^^^^
(share filesystem-path srv/inc as volume /inc,
readable by everyone, read-write for user 'ed',
running this plugin on batches of uploads with the params listed below)
parameters explained,
xiu = execute after uploads...
i1 = ...after volume has been idle for 1sec
j = provide json instead of filepath list
note the "f" (fork) flag is not set, so this xiu
will block other xiu hooks while it's running
"""
def main():
zb = sys.stdin.buffer.read()
zs = zb.decode("utf-8", "replace")
inf = json.loads(zs)
total_sz = 0
for upload in inf:
sz = upload["sz"]
total_sz += sz
print("{:9} {}".format(sz, upload["ap"]))
print("{} files, {} bytes total".format(len(inf), total_sz))
if __name__ == "__main__":
main()

View File

@@ -1,5 +1,9 @@
standalone programs which take an audio file as argument
you may want to forget about all this fancy complicated stuff and just use [event hooks](../hooks/) instead (which doesn't need `-e2ts` or ffmpeg)
----
**NOTE:** these all require `-e2ts` to be functional, meaning you need to do at least one of these: `apt install ffmpeg` or `pip3 install mutagen`
some of these rely on libraries which are not MIT-compatible
@@ -17,6 +21,7 @@ these do not have any problematic dependencies at all:
* [cksum.py](./cksum.py) computes various checksums
* [exe.py](./exe.py) grabs metadata from .exe and .dll files (example for retrieving multiple tags with one parser)
* [wget.py](./wget.py) lets you download files by POSTing URLs to copyparty
* also available as an [event hook](../hooks/wget.py)
# dependencies
@@ -26,7 +31,7 @@ run [`install-deps.sh`](install-deps.sh) to build/install most dependencies requ
*alternatively* (or preferably) use packages from your distro instead, then you'll need at least these:
* from distro: `numpy vamp-plugin-sdk beatroot-vamp mixxx-keyfinder ffmpeg`
* from pypy: `keyfinder vamp`
* from pip: `keyfinder vamp`
# usage from copyparty
@@ -42,7 +47,7 @@ run [`install-deps.sh`](install-deps.sh) to build/install most dependencies requ
* `mtp` modules will not run if a file has existing tags in the db, so clear out the tags with `-e2tsr` the first time you launch with new `mtp` options
## usage with volume-flags
## usage with volflags
instead of affecting all volumes, you can set the options for just one volume like so:

View File

@@ -16,6 +16,10 @@ dep: ffmpeg
"""
# save beat timestamps to ".beats/filename.txt"
SAVE = False
def det(tf):
# fmt: off
sp.check_call([
@@ -23,12 +27,11 @@ def det(tf):
b"-nostdin",
b"-hide_banner",
b"-v", b"fatal",
b"-ss", b"13",
b"-y", b"-i", fsenc(sys.argv[1]),
b"-map", b"0:a:0",
b"-ac", b"1",
b"-ar", b"22050",
b"-t", b"300",
b"-t", b"360",
b"-f", b"f32le",
fsenc(tf)
])
@@ -47,10 +50,29 @@ def det(tf):
print(c["list"][0]["label"].split(" ")[0])
return
# throws if detection failed:
bpm = float(cl[-1]["timestamp"] - cl[1]["timestamp"])
bpm = round(60 * ((len(cl) - 1) / bpm), 2)
print(f"{bpm:.2f}")
# throws if detection failed:
beats = [float(x["timestamp"]) for x in cl]
bds = [b - a for a, b in zip(beats, beats[1:])]
bds.sort()
n0 = int(len(bds) * 0.2)
n1 = int(len(bds) * 0.75) + 1
bds = bds[n0:n1]
bpm = sum(bds)
bpm = round(60 * (len(bds) / bpm), 2)
print(f"{bpm:.2f}")
if SAVE:
fdir, fname = os.path.split(sys.argv[1])
bdir = os.path.join(fdir, ".beats")
try:
os.mkdir(fsenc(bdir))
except:
pass
fp = os.path.join(bdir, fname) + ".txt"
with open(fsenc(fp), "wb") as f:
txt = "\n".join([f"{x:.2f}" for x in beats])
f.write(txt.encode("utf-8"))
def main():

View File

@@ -17,7 +17,7 @@ except:
"""
calculates various checksums for uploads,
usage: -mtp crc32,md5,sha1,sha256b=bin/mtag/cksum.py
usage: -mtp crc32,md5,sha1,sha256b=ad,bin/mtag/cksum.py
"""

61
bin/mtag/guestbook-read.py Executable file
View File

@@ -0,0 +1,61 @@
#!/usr/bin/env python3
"""
fetch latest msg from guestbook and return as tag
example copyparty config to use this:
--urlform save,get -vsrv/hello:hello:w:c,e2ts,mtp=guestbook=t10,ad,p,bin/mtag/guestbook-read.py:mte=+guestbook
explained:
for realpath srv/hello (served at /hello), write-only for eveyrone,
enable file analysis on upload (e2ts),
use mtp plugin "bin/mtag/guestbook-read.py" to provide metadata tag "guestbook",
do this on all uploads regardless of extension,
t10 = 10 seconds timeout for each dwonload,
ad = parse file regardless if FFmpeg thinks it is audio or not
p = request upload info as json on stdin (need ip)
mte=+guestbook enabled indexing of that tag for this volume
PS: this requires e2ts to be functional,
meaning you need to do at least one of these:
* apt install ffmpeg
* pip3 install mutagen
"""
import json
import os
import sqlite3
import sys
# set 0 to allow infinite msgs from one IP,
# other values delete older messages to make space,
# so 1 only keeps latest msg
NUM_MSGS_TO_KEEP = 1
def main():
fp = os.path.abspath(sys.argv[1])
fdir = os.path.dirname(fp)
zb = sys.stdin.buffer.read()
zs = zb.decode("utf-8", "replace")
md = json.loads(zs)
ip = md["up_ip"]
# can put the database inside `fdir` if you'd like,
# by default it saves to PWD:
# os.chdir(fdir)
db = sqlite3.connect("guestbook.db3")
with db:
t = "select msg from gb where ip = ? order by ts desc"
r = db.execute(t, (ip,)).fetchone()
if r:
print(r[0])
if __name__ == "__main__":
main()

111
bin/mtag/guestbook.py Normal file
View File

@@ -0,0 +1,111 @@
#!/usr/bin/env python3
"""
store messages from users in an sqlite database
which can be read from another mtp for example
takes input from application/x-www-form-urlencoded POSTs,
for example using the message/pager function on the website
example copyparty config to use this:
--urlform save,get -vsrv/hello:hello:w:c,e2ts,mtp=xgb=ebin,t10,ad,p,bin/mtag/guestbook.py:mte=+xgb
explained:
for realpath srv/hello (served at /hello),write-only for eveyrone,
enable file analysis on upload (e2ts),
use mtp plugin "bin/mtag/guestbook.py" to provide metadata tag "xgb",
do this on all uploads with the file extension "bin",
t300 = 300 seconds timeout for each dwonload,
ad = parse file regardless if FFmpeg thinks it is audio or not
p = request upload info as json on stdin
mte=+xgb enabled indexing of that tag for this volume
PS: this requires e2ts to be functional,
meaning you need to do at least one of these:
* apt install ffmpeg
* pip3 install mutagen
"""
import json
import os
import sqlite3
import sys
from urllib.parse import unquote_to_bytes as unquote
# set 0 to allow infinite msgs from one IP,
# other values delete older messages to make space,
# so 1 only keeps latest msg
NUM_MSGS_TO_KEEP = 1
def main():
fp = os.path.abspath(sys.argv[1])
fdir = os.path.dirname(fp)
fname = os.path.basename(fp)
if not fname.startswith("put-") or not fname.endswith(".bin"):
raise Exception("not a post file")
zb = sys.stdin.buffer.read()
zs = zb.decode("utf-8", "replace")
md = json.loads(zs)
buf = b""
with open(fp, "rb") as f:
while True:
b = f.read(4096)
buf += b
if len(buf) > 4096:
raise Exception("too big")
if not b:
break
if not buf:
raise Exception("file is empty")
buf = unquote(buf.replace(b"+", b" "))
txt = buf.decode("utf-8")
if not txt.startswith("msg="):
raise Exception("does not start with msg=")
ip = md["up_ip"]
ts = md["up_at"]
txt = txt[4:]
# can put the database inside `fdir` if you'd like,
# by default it saves to PWD:
# os.chdir(fdir)
db = sqlite3.connect("guestbook.db3")
try:
db.execute("select 1 from gb").fetchone()
except:
with db:
db.execute("create table gb (ip text, ts real, msg text)")
db.execute("create index gb_ip on gb(ip)")
with db:
if NUM_MSGS_TO_KEEP == 1:
t = "delete from gb where ip = ?"
db.execute(t, (ip,))
t = "insert into gb values (?,?,?)"
db.execute(t, (ip, ts, txt))
if NUM_MSGS_TO_KEEP > 1:
t = "select ts from gb where ip = ? order by ts desc"
hits = db.execute(t, (ip,)).fetchall()
if len(hits) > NUM_MSGS_TO_KEEP:
lim = hits[NUM_MSGS_TO_KEEP][0]
t = "delete from gb where ip = ? and ts <= ?"
db.execute(t, (ip, lim))
print(txt)
if __name__ == "__main__":
main()

View File

@@ -43,7 +43,6 @@ PS: this requires e2ts to be functional,
import os
import sys
import time
import filecmp
import subprocess as sp
@@ -62,7 +61,7 @@ def main():
os.chdir(cwd)
f1 = fsenc(fn)
f2 = os.path.join(b"noexif", f1)
f2 = fsenc(os.path.join(b"noexif", fn))
cmd = [
b"exiftool",
b"-exif:all=",
@@ -90,4 +89,7 @@ def main():
if __name__ == "__main__":
main()
try:
main()
except:
pass

View File

@@ -6,6 +6,7 @@ set -e
#
# linux/alpine: requires gcc g++ make cmake patchelf {python3,ffmpeg,fftw,libsndfile}-dev py3-{wheel,pip} py3-numpy{,-dev}
# linux/debian: requires libav{codec,device,filter,format,resample,util}-dev {libfftw3,python3,libsndfile1}-dev python3-{numpy,pip} vamp-{plugin-sdk,examples} patchelf cmake
# linux/fedora: requires gcc gcc-c++ make cmake patchelf {python3,ffmpeg,fftw,libsndfile}-devel python3-numpy vamp-plugin-sdk qm-vamp-plugins
# win64: requires msys2-mingw64 environment
# macos: requires macports
#
@@ -56,6 +57,7 @@ hash -r
command -v python3 && pybin=python3 || pybin=python
}
$pybin -c 'import numpy' ||
$pybin -m pip install --user numpy
@@ -160,12 +162,12 @@ install_keyfinder() {
h="$HOME"
so="lib/libkeyfinder.so"
memes=()
memes=(-DBUILD_TESTING=OFF)
[ $win ] &&
so="bin/libkeyfinder.dll" &&
h="$(printf '%s\n' "$USERPROFILE" | tr '\\' '/')" &&
memes+=(-G "MinGW Makefiles" -DBUILD_TESTING=OFF)
memes+=(-G "MinGW Makefiles")
[ $mac ] &&
so="lib/libkeyfinder.dylib"
@@ -185,7 +187,7 @@ install_keyfinder() {
}
# rm -rf /Users/ed/Library/Python/3.9/lib/python/site-packages/*keyfinder*
CFLAGS="-I$h/pe/keyfinder/include -I/opt/local/include" \
CFLAGS="-I$h/pe/keyfinder/include -I/opt/local/include -I/usr/include/ffmpeg" \
LDFLAGS="-L$h/pe/keyfinder/lib -L$h/pe/keyfinder/lib64 -L/opt/local/lib" \
PKG_CONFIG_PATH=/c/msys64/mingw64/lib/pkgconfig \
$pybin -m pip install --user keyfinder
@@ -223,7 +225,7 @@ install_vamp() {
$pybin -m pip install --user vamp
cd "$td"
echo '#include <vamp-sdk/Plugin.h>' | gcc -x c -c -o /dev/null - || [ -e ~/pe/vamp-sdk ] || {
echo '#include <vamp-sdk/Plugin.h>' | g++ -x c++ -c -o /dev/null - || [ -e ~/pe/vamp-sdk ] || {
printf '\033[33mcould not find the vamp-sdk, building from source\033[0m\n'
(dl_files yolo https://code.soundsoftware.ac.uk/attachments/download/2588/vamp-plugin-sdk-2.9.0.tar.gz)
sha512sum -c <(

38
bin/mtag/mousepad.py Normal file
View File

@@ -0,0 +1,38 @@
#!/usr/bin/env python3
import os
import sys
import subprocess as sp
"""
mtp test -- opens a texteditor
usage:
-vsrv/v1:v1:r:c,mte=+x1:c,mtp=x1=ad,p,bin/mtag/mousepad.py
explained:
c,mte: list of tags to index in this volume
c,mtp: add new tag provider
x1: dummy tag to provide
ad: dontcare if audio or not
p: priority 1 (run after initial tag-scan with ffprobe or mutagen)
"""
def main():
env = os.environ.copy()
env["DISPLAY"] = ":0.0"
if False:
# open the uploaded file
fp = sys.argv[-1]
else:
# display stdin contents (`oth_tags`)
fp = "/dev/stdin"
p = sp.Popen(["/usr/bin/mousepad", fp])
p.communicate()
main()

76
bin/mtag/rclone-upload.py Normal file
View File

@@ -0,0 +1,76 @@
#!/usr/bin/env python
import json
import os
import subprocess as sp
import sys
import time
try:
from copyparty.util import fsenc
except:
def fsenc(p):
return p.encode("utf-8")
_ = r"""
first checks the tag "vidchk" which must be "ok" to continue,
then uploads all files to some cloud storage (RCLONE_REMOTE)
and DELETES THE ORIGINAL FILES if rclone returns 0 ("success")
deps:
rclone
usage:
-mtp x2=t43200,ay,p2,bin/mtag/rclone-upload.py
explained:
t43200: timeout 12h
ay: only process files which contain audio (including video with audio)
p2: set priority 2 (after vidchk's suggested priority of 1),
so the output of vidchk will be passed in here
complete usage example as vflags along with vidchk:
-vsrv/vidchk:vidchk:r:rw,ed:c,e2dsa,e2ts,mtp=vidchk=t600,p,bin/mtag/vidchk.py:c,mtp=rupload=t43200,ay,p2,bin/mtag/rclone-upload.py:c,mte=+vidchk,rupload
setup: see https://rclone.org/drive/
if you wanna use this script standalone / separately from copyparty,
either set CONDITIONAL_UPLOAD False or provide the following stdin:
{"vidchk":"ok"}
"""
RCLONE_REMOTE = "notmybox"
CONDITIONAL_UPLOAD = True
def main():
fp = sys.argv[1]
if CONDITIONAL_UPLOAD:
zb = sys.stdin.buffer.read()
zs = zb.decode("utf-8", "replace")
md = json.loads(zs)
chk = md.get("vidchk", None)
if chk != "ok":
print(f"vidchk={chk}", file=sys.stderr)
sys.exit(1)
dst = f"{RCLONE_REMOTE}:".encode("utf-8")
cmd = [b"rclone", b"copy", b"--", fsenc(fp), dst]
t0 = time.time()
try:
sp.check_call(cmd)
except:
print("rclone failed", file=sys.stderr)
sys.exit(1)
print(f"{time.time() - t0:.1f} sec")
os.unlink(fsenc(fp))
if __name__ == "__main__":
main()

View File

@@ -16,7 +16,7 @@ goes without saying, but this is HELLA DANGEROUS,
GIVES RCE TO ANYONE WHO HAVE UPLOAD PERMISSIONS
example copyparty config to use this:
--urlform save,get -v.::w:c,e2d,e2t,mte=+a1:c,mtp=a1=ad,bin/mtag/very-bad-idea.py
--urlform save,get -v.::w:c,e2d,e2t,mte=+a1:c,mtp=a1=ad,kn,c0,bin/mtag/very-bad-idea.py
recommended deps:
apt install xdotool libnotify-bin
@@ -63,8 +63,8 @@ set -e
EOF
chmod 755 /usr/local/bin/chromium-browser
# start the server (note: replace `-v.::rw:` with `-v.::r:` to disallow retrieving uploaded stuff)
cd ~/Downloads; python3 copyparty-sfx.py --urlform save,get -v.::rw:c,e2d,e2t,mte=+a1:c,mtp=a1=ad,very-bad-idea.py
# start the server (note: replace `-v.::rw:` with `-v.::w:` to disallow retrieving uploaded stuff)
cd ~/Downloads; python3 copyparty-sfx.py --urlform save,get -v.::rw:c,e2d,e2t,mte=+a1:c,mtp=a1=ad,kn,very-bad-idea.py
"""

131
bin/mtag/vidchk.py Executable file
View File

@@ -0,0 +1,131 @@
#!/usr/bin/env python3
import json
import re
import os
import sys
import subprocess as sp
try:
from copyparty.util import fsenc
except:
def fsenc(p):
return p.encode("utf-8")
_ = r"""
inspects video files for errors and such
plus stores a bunch of metadata to filename.ff.json
usage:
-mtp vidchk=t600,ay,p,bin/mtag/vidchk.py
explained:
t600: timeout 10min
ay: only process files which contain audio (including video with audio)
p: set priority 1 (lowest priority after initial ffprobe/mutagen for base tags),
makes copyparty feed base tags into this script as json
if you wanna use this script standalone / separately from copyparty,
provide the video resolution on stdin as json: {"res":"1920x1080"}
"""
FAST = True # parse entire file at container level
# FAST = False # fully decode audio and video streams
# warnings to ignore
harmless = re.compile(
r"Unsupported codec with id |Could not find codec parameters.*Attachment:|analyzeduration"
+ r"|timescale not set"
)
def wfilter(lines):
return [x for x in lines if x.strip() and not harmless.search(x)]
def errchk(so, se, rc, dbg):
if dbg:
with open(dbg, "wb") as f:
f.write(b"so:\n" + so + b"\nse:\n" + se + b"\n")
if rc:
err = (so + se).decode("utf-8", "replace").split("\n", 1)
err = wfilter(err) or err
return f"ERROR {rc}: {err[0]}"
if se:
err = se.decode("utf-8", "replace").split("\n", 1)
err = wfilter(err)
if err:
return f"Warning: {err[0]}"
return None
def main():
fp = sys.argv[1]
zb = sys.stdin.buffer.read()
zs = zb.decode("utf-8", "replace")
md = json.loads(zs)
fdir = os.path.dirname(os.path.realpath(fp))
flag = os.path.join(fdir, ".processed")
if os.path.exists(flag):
return "already processed"
try:
w, h = [int(x) for x in md["res"].split("x")]
if not w + h:
raise Exception()
except:
return "could not determine resolution"
# grab streams/format metadata + 2 seconds of frames at the start and end
zs = "ffprobe -hide_banner -v warning -of json -show_streams -show_format -show_packets -show_data_hash crc32 -read_intervals %+2,999999%+2"
cmd = zs.encode("ascii").split(b" ") + [fsenc(fp)]
p = sp.Popen(cmd, stdout=sp.PIPE, stderr=sp.PIPE)
so, se = p.communicate()
# spaces to tabs, drops filesize from 69k to 48k
so = b"\n".join(
[
b"\t" * int((len(x) - len(x.lstrip())) / 4) + x.lstrip()
for x in (so or b"").split(b"\n")
]
)
with open(fsenc(f"{fp}.ff.json"), "wb") as f:
f.write(so)
err = errchk(so, se, p.returncode, f"{fp}.vidchk")
if err:
return err
if max(w, h) < 1280 and min(w, h) < 720:
return "resolution too small"
zs = (
"ffmpeg -y -hide_banner -nostdin -v warning"
+ " -err_detect +crccheck+bitstream+buffer+careful+compliant+aggressive+explode"
+ " -xerror -i"
)
cmd = zs.encode("ascii").split(b" ") + [fsenc(fp)]
if FAST:
zs = "-c copy -f null -"
else:
zs = "-vcodec rawvideo -acodec pcm_s16le -f null -"
cmd += zs.encode("ascii").split(b" ")
p = sp.Popen(cmd, stdout=sp.PIPE, stderr=sp.PIPE)
so, se = p.communicate()
return errchk(so, se, p.returncode, f"{fp}.vidchk")
if __name__ == "__main__":
print(main() or "ok")

View File

@@ -1,6 +1,11 @@
#!/usr/bin/env python3
"""
DEPRECATED -- replaced by event hooks;
https://github.com/9001/copyparty/blob/hovudstraum/bin/hooks/wget.py
---
use copyparty as a file downloader by POSTing URLs as
application/x-www-form-urlencoded (for example using the
message/pager function on the website)

View File

@@ -1,7 +1,7 @@
#!/usr/bin/env python3
from __future__ import print_function, unicode_literals
"""copyparty-fuse-streaming: remote copyparty as a local filesystem"""
"""partyfuse-streaming: remote copyparty as a local filesystem"""
__author__ = "ed <copyparty@ocv.me>"
__copyright__ = 2020
__license__ = "MIT"
@@ -12,7 +12,7 @@ __url__ = "https://github.com/9001/copyparty/"
mount a copyparty server (local or remote) as a filesystem
usage:
python copyparty-fuse-streaming.py http://192.168.1.69:3923/ ./music
python partyfuse-streaming.py http://192.168.1.69:3923/ ./music
dependencies:
python3 -m pip install --user fusepy
@@ -21,7 +21,7 @@ dependencies:
+ on Windows: https://github.com/billziss-gh/winfsp/releases/latest
this was a mistake:
fork of copyparty-fuse.py with a streaming cache rather than readahead,
fork of partyfuse.py with a streaming cache rather than readahead,
thought this was gonna be way faster (and it kind of is)
except the overhead of reopening connections on trunc totally kills it
"""
@@ -42,6 +42,7 @@ import threading
import traceback
import http.client # py2: httplib
import urllib.parse
import calendar
from datetime import datetime
from urllib.parse import quote_from_bytes as quote
from urllib.parse import unquote_to_bytes as unquote
@@ -61,12 +62,12 @@ except:
else:
libfuse = "apt install libfuse\n modprobe fuse"
print(
"\n could not import fuse; these may help:"
+ "\n python3 -m pip install --user fusepy\n "
+ libfuse
+ "\n"
)
m = """\033[33m
could not import fuse; these may help:
{} -m pip install --user fusepy
{}
\033[0m"""
print(m.format(sys.executable, libfuse))
raise
@@ -153,7 +154,7 @@ def dewin(txt):
class RecentLog(object):
def __init__(self):
self.mtx = threading.Lock()
self.f = None # open("copyparty-fuse.log", "wb")
self.f = None # open("partyfuse.log", "wb")
self.q = []
thr = threading.Thread(target=self.printer)
@@ -184,9 +185,9 @@ class RecentLog(object):
print("".join(q), end="")
# [windows/cmd/cpy3] python dev\copyparty\bin\copyparty-fuse.py q: http://192.168.1.159:1234/
# [windows/cmd/msys2] C:\msys64\mingw64\bin\python3 dev\copyparty\bin\copyparty-fuse.py q: http://192.168.1.159:1234/
# [windows/mty/msys2] /mingw64/bin/python3 /c/Users/ed/dev/copyparty/bin/copyparty-fuse.py q: http://192.168.1.159:1234/
# [windows/cmd/cpy3] python dev\copyparty\bin\partyfuse.py q: http://192.168.1.159:1234/
# [windows/cmd/msys2] C:\msys64\mingw64\bin\python3 dev\copyparty\bin\partyfuse.py q: http://192.168.1.159:1234/
# [windows/mty/msys2] /mingw64/bin/python3 /c/Users/ed/dev/copyparty/bin/partyfuse.py q: http://192.168.1.159:1234/
#
# [windows] find /q/music/albums/Phant*24bit -printf '%s %p\n' | sort -n | tail -n 8 | sed -r 's/^[0-9]+ //' | while IFS= read -r x; do dd if="$x" of=/dev/null bs=4k count=8192 & done
# [alpine] ll t; for x in t/2020_0724_16{2,3}*; do dd if="$x" of=/dev/null bs=4k count=10240 & done
@@ -495,7 +496,7 @@ class Gateway(object):
ts = 60 * 60 * 24 * 2
try:
sz = int(fsize)
ts = datetime.strptime(fdate, "%Y-%m-%d %H:%M:%S").timestamp()
ts = calendar.timegm(time.strptime(fdate, "%Y-%m-%d %H:%M:%S"))
except:
info("bad HTML or OS [{}] [{}]".format(fdate, fsize))
# python cannot strptime(1959-01-01) on windows

View File

@@ -1,7 +1,7 @@
#!/usr/bin/env python3
from __future__ import print_function, unicode_literals
"""copyparty-fuse: remote copyparty as a local filesystem"""
"""partyfuse: remote copyparty as a local filesystem"""
__author__ = "ed <copyparty@ocv.me>"
__copyright__ = 2019
__license__ = "MIT"
@@ -12,7 +12,7 @@ __url__ = "https://github.com/9001/copyparty/"
mount a copyparty server (local or remote) as a filesystem
usage:
python copyparty-fuse.py http://192.168.1.69:3923/ ./music
python partyfuse.py http://192.168.1.69:3923/ ./music
dependencies:
python3 -m pip install --user fusepy
@@ -45,6 +45,7 @@ import threading
import traceback
import http.client # py2: httplib
import urllib.parse
import calendar
from datetime import datetime
from urllib.parse import quote_from_bytes as quote
from urllib.parse import unquote_to_bytes as unquote
@@ -73,12 +74,12 @@ except:
else:
libfuse = "apt install libfuse3-3\n modprobe fuse"
print(
"\n could not import fuse; these may help:"
+ "\n python3 -m pip install --user fusepy\n "
+ libfuse
+ "\n"
)
m = """\033[33m
could not import fuse; these may help:
{} -m pip install --user fusepy
{}
\033[0m"""
print(m.format(sys.executable, libfuse))
raise
@@ -165,7 +166,7 @@ def dewin(txt):
class RecentLog(object):
def __init__(self):
self.mtx = threading.Lock()
self.f = None # open("copyparty-fuse.log", "wb")
self.f = None # open("partyfuse.log", "wb")
self.q = []
thr = threading.Thread(target=self.printer)
@@ -196,9 +197,9 @@ class RecentLog(object):
print("".join(q), end="")
# [windows/cmd/cpy3] python dev\copyparty\bin\copyparty-fuse.py q: http://192.168.1.159:1234/
# [windows/cmd/msys2] C:\msys64\mingw64\bin\python3 dev\copyparty\bin\copyparty-fuse.py q: http://192.168.1.159:1234/
# [windows/mty/msys2] /mingw64/bin/python3 /c/Users/ed/dev/copyparty/bin/copyparty-fuse.py q: http://192.168.1.159:1234/
# [windows/cmd/cpy3] python dev\copyparty\bin\partyfuse.py q: http://192.168.1.159:1234/
# [windows/cmd/msys2] C:\msys64\mingw64\bin\python3 dev\copyparty\bin\partyfuse.py q: http://192.168.1.159:1234/
# [windows/mty/msys2] /mingw64/bin/python3 /c/Users/ed/dev/copyparty/bin/partyfuse.py q: http://192.168.1.159:1234/
#
# [windows] find /q/music/albums/Phant*24bit -printf '%s %p\n' | sort -n | tail -n 8 | sed -r 's/^[0-9]+ //' | while IFS= read -r x; do dd if="$x" of=/dev/null bs=4k count=8192 & done
# [alpine] ll t; for x in t/2020_0724_16{2,3}*; do dd if="$x" of=/dev/null bs=4k count=10240 & done
@@ -443,7 +444,7 @@ class Gateway(object):
ts = 60 * 60 * 24 * 2
try:
sz = int(fsize)
ts = datetime.strptime(fdate, "%Y-%m-%d %H:%M:%S").timestamp()
ts = calendar.timegm(time.strptime(fdate, "%Y-%m-%d %H:%M:%S"))
except:
info("bad HTML or OS [{}] [{}]".format(fdate, fsize))
# python cannot strptime(1959-01-01) on windows
@@ -996,7 +997,7 @@ def main():
ap.add_argument(
"-cf", metavar="NUM_BLOCKS", type=int, default=nf, help="file cache"
)
ap.add_argument("-a", metavar="PASSWORD", help="password")
ap.add_argument("-a", metavar="PASSWORD", help="password or $filepath")
ap.add_argument("-d", action="store_true", help="enable debug")
ap.add_argument("-te", metavar="PEM_FILE", help="certificate to expect/verify")
ap.add_argument("-td", action="store_true", help="disable certificate check")

View File

@@ -1,7 +1,7 @@
#!/usr/bin/env python3
from __future__ import print_function, unicode_literals
"""copyparty-fuseb: remote copyparty as a local filesystem"""
"""partyfuse2: remote copyparty as a local filesystem"""
__author__ = "ed <copyparty@ocv.me>"
__copyright__ = 2020
__license__ = "MIT"
@@ -32,9 +32,19 @@ try:
if not hasattr(fuse, "__version__"):
raise Exception("your fuse-python is way old")
except:
print(
"\n could not import fuse; these may help:\n python3 -m pip install --user fuse-python\n apt install libfuse\n modprobe fuse\n"
)
if WINDOWS:
libfuse = "install https://github.com/billziss-gh/winfsp/releases/latest"
elif MACOS:
libfuse = "install https://osxfuse.github.io/"
else:
libfuse = "apt install libfuse\n modprobe fuse"
m = """\033[33m
could not import fuse; these may help:
{} -m pip install --user fuse-python
{}
\033[0m"""
print(m.format(sys.executable, libfuse))
raise
@@ -42,13 +52,13 @@ except:
mount a copyparty server (local or remote) as a filesystem
usage:
python ./copyparty-fuseb.py -f -o allow_other,auto_unmount,nonempty,pw=wark,url=http://192.168.1.69:3923 /mnt/nas
python ./partyfuse2.py -f -o allow_other,auto_unmount,nonempty,pw=wark,url=http://192.168.1.69:3923 /mnt/nas
dependencies:
sudo apk add fuse-dev python3-dev
python3 -m pip install --user fuse-python
fork of copyparty-fuse.py based on fuse-python which
fork of partyfuse.py based on fuse-python which
appears to be more compliant than fusepy? since this works with samba
(probably just my garbage code tbh)
"""
@@ -639,7 +649,7 @@ def main():
print(" need argument: mount-path")
print("example:")
print(
" ./copyparty-fuseb.py -f -o allow_other,auto_unmount,nonempty,pw=wark,url=http://192.168.1.69:3923 /mnt/nas"
" ./partyfuse2.py -f -o allow_other,auto_unmount,nonempty,pw=wark,url=http://192.168.1.69:3923 /mnt/nas"
)
sys.exit(1)

View File

@@ -4,20 +4,21 @@ set -e
# runs copyparty (or any other program really) in a chroot
#
# assumption: these directories, and everything within, are owned by root
sysdirs=( /bin /lib /lib32 /lib64 /sbin /usr )
sysdirs=(); for v in /bin /lib /lib32 /lib64 /sbin /usr /etc/alternatives ; do
[ -e $v ] && sysdirs+=($v)
done
# error-handler
help() { cat <<'EOF'
usage:
./prisonparty.sh <ROOTDIR> <UID> <GID> [VOLDIR [VOLDIR...]] -- python3 copyparty-sfx.py [...]"
./prisonparty.sh <ROOTDIR> <UID> <GID> [VOLDIR [VOLDIR...]] -- python3 copyparty-sfx.py [...]
example:
./prisonparty.sh /var/lib/copyparty-jail 1000 1000 /mnt/nas/music -- python3 copyparty-sfx.py -v /mnt/nas/music::rwmd"
./prisonparty.sh /var/lib/copyparty-jail 1000 1000 /mnt/nas/music -- python3 copyparty-sfx.py -v /mnt/nas/music::rwmd
example for running straight from source (instead of using an sfx):
PYTHONPATH=$PWD ./prisonparty.sh /var/lib/copyparty-jail 1000 1000 /mnt/nas/music -- python3 -um copyparty -v /mnt/nas/music::rwmd"
PYTHONPATH=$PWD ./prisonparty.sh /var/lib/copyparty-jail 1000 1000 /mnt/nas/music -- python3 -um copyparty -v /mnt/nas/music::rwmd
note that if you have python modules installed as --user (such as bpm/key detectors),
you should add /home/foo/.local as a VOLDIR
@@ -38,7 +39,7 @@ while true; do
v="$1"; shift
[ "$v" = -- ] && break # end of volumes
[ "$#" -eq 0 ] && break # invalid usage
vols+=( "$(realpath "$v")" )
vols+=( "$(realpath "$v" || echo "$v")" )
done
pybin="$1"; shift
pybin="$(command -v "$pybin")"
@@ -82,7 +83,7 @@ jail="${jail%/}"
printf '%s\n' "${sysdirs[@]}" "${vols[@]}" | sed -r 's`/$``' | LC_ALL=C sort | uniq |
while IFS= read -r v; do
[ -e "$v" ] || {
# printf '\033[1;31mfolder does not exist:\033[0m %s\n' "/$v"
printf '\033[1;31mfolder does not exist:\033[0m %s\n' "$v"
continue
}
i1=$(stat -c%D.%i "$v" 2>/dev/null || echo a)
@@ -97,9 +98,11 @@ done
cln() {
rv=$?
# cleanup if not in use
lsof "$jail" | grep -qF "$jail" &&
echo "chroot is in use, will not cleanup" ||
wait -f -p rv $p || true
cd /
echo "stopping chroot..."
lsof "$jail" | grep -F "$jail" &&
echo "chroot is in use; will not unmount" ||
{
mount | grep -F " on $jail" |
awk '{sub(/ type .*/,"");sub(/.* on /,"");print}' |
@@ -115,6 +118,15 @@ mkdir -p "$jail/tmp"
chmod 777 "$jail/tmp"
# create a dev
(cd $jail; mkdir -p dev; cd dev
[ -e null ] || mknod -m 666 null c 1 3
[ -e zero ] || mknod -m 666 zero c 1 5
[ -e random ] || mknod -m 444 random c 1 8
[ -e urandom ] || mknod -m 444 urandom c 1 9
)
# run copyparty
export HOME=$(getent passwd $uid | cut -d: -f6)
export USER=$(getent passwd $uid | cut -d: -f1)
@@ -124,5 +136,6 @@ export LOGNAME="$USER"
#echo "cpp [$cpp]"
chroot --userspec=$uid:$gid "$jail" "$pybin" $pyarg "$cpp" "$@" &
p=$!
trap 'kill -USR1 $p' USR1
trap 'kill $p' INT TERM
wait

99
bin/unforget.py Executable file
View File

@@ -0,0 +1,99 @@
#!/usr/bin/env python3
"""
unforget.py: rebuild db from logfiles
2022-09-07, v0.1, ed <irc.rizon.net>, MIT-Licensed
https://github.com/9001/copyparty/blob/hovudstraum/bin/unforget.py
only makes sense if running copyparty with --no-forget
(e.g. immediately shifting uploads to other storage)
usage:
xz -d < log | ./unforget.py .hist/up2k.db
"""
import re
import sys
import json
import base64
import sqlite3
import argparse
FS_ENCODING = sys.getfilesystemencoding()
class APF(argparse.ArgumentDefaultsHelpFormatter, argparse.RawDescriptionHelpFormatter):
pass
mem_cur = sqlite3.connect(":memory:").cursor()
mem_cur.execute(r"create table a (b text)")
def s3enc(rd: str, fn: str) -> tuple[str, str]:
ret: list[str] = []
for v in [rd, fn]:
try:
mem_cur.execute("select * from a where b = ?", (v,))
ret.append(v)
except:
wtf8 = v.encode(FS_ENCODING, "surrogateescape")
ret.append("//" + base64.urlsafe_b64encode(wtf8).decode("ascii"))
return ret[0], ret[1]
def main():
ap = argparse.ArgumentParser()
ap.add_argument("db")
ar = ap.parse_args()
db = sqlite3.connect(ar.db).cursor()
ptn_times = re.compile(r"no more chunks, setting times \(([0-9]+)")
at = 0
ctr = 0
for ln in [x.decode("utf-8", "replace").rstrip() for x in sys.stdin.buffer]:
if "no more chunks, setting times (" in ln:
m = ptn_times.search(ln)
if m:
at = int(m.group(1))
if '"hash": []' in ln:
try:
ofs = ln.find("{")
j = json.loads(ln[ofs:])
except:
pass
w = j["wark"]
if db.execute("select w from up where w = ?", (w,)).fetchone():
continue
# PYTHONPATH=/home/ed/dev/copyparty/ python3 -m copyparty -e2dsa -v foo:foo:rwmd,ed -aed:wark --no-forget
# 05:34:43.845 127.0.0.1 42496 no more chunks, setting times (1662528883, 1658001882)
# 05:34:43.863 127.0.0.1 42496 {"name": "f\"2", "purl": "/foo/bar/baz/", "size": 1674, "lmod": 1658001882, "sprs": true, "hash": [], "wark": "LKIWpp2jEAh9dH3fu-DobuURFGEKlODXDGTpZ1otMhUg"}
# | w | mt | sz | rd | fn | ip | at |
# | LKIWpp2jEAh9dH3fu-DobuURFGEKlODXDGTpZ1otMhUg | 1658001882 | 1674 | bar/baz | f"2 | 127.0.0.1 | 1662528883 |
rd, fn = s3enc(j["purl"].strip("/"), j["name"])
ip = ln.split(" ")[1].split("m")[-1]
q = "insert into up values (?,?,?,?,?,?,?)"
v = (w, int(j["lmod"]), int(j["size"]), rd, fn, ip, at)
db.execute(q, v)
ctr += 1
if ctr % 1024 == 1023:
print(f"{ctr} commit...")
db.connection.commit()
if ctr:
db.connection.commit()
print(f"unforgot {ctr} files")
if __name__ == "__main__":
main()

File diff suppressed because it is too large Load Diff

View File

@@ -22,13 +22,23 @@ however if your copyparty is behind a reverse-proxy, you may want to use [`share
* `URL`: full URL to the root folder (with trailing slash) followed by `$regex:1|1$`
* `pw`: password (remove `Parameters` if anon-write)
### [`media-osd-bgone.ps1`](media-osd-bgone.ps1)
* disables the [windows OSD popup](https://user-images.githubusercontent.com/241032/122821375-0e08df80-d2dd-11eb-9fd9-184e8aacf1d0.png) (the thing on the left) which appears every time you hit media hotkeys to adjust volume or change song while playing music with the copyparty web-ui, or most other audio players really
### [`explorer-nothumbs-nofoldertypes.reg`](explorer-nothumbs-nofoldertypes.reg)
* disables thumbnails and folder-type detection in windows explorer
* makes it way faster (especially for slow/networked locations (such as copyparty-fuse))
* makes it way faster (especially for slow/networked locations (such as partyfuse))
### [`webdav-cfg.reg`](webdav-cfg.bat)
* improves the native webdav support in windows;
* removes the 47.6 MiB filesize limit when downloading from webdav
* optionally enables webdav basic-auth over plaintext http
* optionally helps disable wpad, removing the 10sec latency
### [`cfssl.sh`](cfssl.sh)
* creates CA and server certificates using cfssl
* give a 3rd argument to install it to your copyparty config
* systemd service at [`systemd/cfssl.service`](systemd/cfssl.service)
# OS integration
init-scripts to start copyparty as a service

View File

@@ -0,0 +1,14 @@
# when running copyparty behind a reverse proxy,
# the following arguments are recommended:
#
# -i 127.0.0.1 only accept connections from nginx
#
# if you are doing location-based proxying (such as `/stuff` below)
# you must run copyparty with --rp-loc=stuff
#
# on fedora/rhel, remember to setsebool -P httpd_can_network_connect 1
LoadModule proxy_module modules/mod_proxy.so
ProxyPass "/stuff" "http://127.0.0.1:3923/stuff"
# do not specify ProxyPassReverse
RequestHeader set "X-Forwarded-Proto" expr=%{REQUEST_SCHEME}

View File

@@ -7,7 +7,7 @@ srv_fqdn="$2"
[ -z "$srv_fqdn" ] && {
echo "need arg 1: ca name"
echo "need arg 2: server fqdn"
echo "need arg 2: server fqdn and/or IPs, comma-separated"
echo "optional arg 3: if set, write cert into copyparty cfg"
exit 1
}

View File

@@ -3,7 +3,7 @@
<head>
<meta charset="utf-8">
<title>🎉 redirect</title>
<title>💾🎉 redirect</title>
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<style>

Binary file not shown.

104
contrib/media-osd-bgone.ps1 Normal file
View File

@@ -0,0 +1,104 @@
# media-osd-bgone.ps1: disable media-control OSD on win10do
# v1.1, 2021-06-25, ed <irc.rizon.net>, MIT-licensed
# https://github.com/9001/copyparty/blob/hovudstraum/contrib/media-osd-bgone.ps1
#
# locates the first window that looks like the media OSD and minimizes it;
# doing this once after each reboot should do the trick
# (adjust the width/height filter if it doesn't work)
#
# ---------------------------------------------------------------------
#
# tip: save the following as "media-osd-bgone.bat" next to this script:
# start cmd /c "powershell -command ""set-executionpolicy -scope process bypass; .\media-osd-bgone.ps1"" & ping -n 2 127.1 >nul"
#
# then create a shortcut to that bat-file and move the shortcut here:
# %appdata%\Microsoft\Windows\Start Menu\Programs\Startup
#
# and now this will autorun on bootup
Add-Type -TypeDefinition @"
using System;
using System.IO;
using System.Threading;
using System.Diagnostics;
using System.Runtime.InteropServices;
using System.Windows.Forms;
namespace A {
public class B : Control {
[DllImport("user32.dll")]
static extern void keybd_event(byte bVk, byte bScan, uint dwFlags, int dwExtraInfo);
[DllImport("user32.dll", SetLastError = true)]
static extern IntPtr FindWindowEx(IntPtr hwndParent, IntPtr hwndChildAfter, string lpszClass, string lpszWindow);
[DllImport("user32.dll", SetLastError=true)]
static extern bool GetWindowRect(IntPtr hwnd, out RECT lpRect);
[DllImport("user32.dll")]
static extern bool ShowWindow(IntPtr hWnd, int nCmdShow);
[StructLayout(LayoutKind.Sequential)]
public struct RECT {
public int x;
public int y;
public int x2;
public int y2;
}
bool fa() {
RECT r;
IntPtr it = IntPtr.Zero;
while ((it = FindWindowEx(IntPtr.Zero, it, "NativeHWNDHost", "")) != IntPtr.Zero) {
if (FindWindowEx(it, IntPtr.Zero, "DirectUIHWND", "") == IntPtr.Zero)
continue;
if (!GetWindowRect(it, out r))
continue;
int w = r.x2 - r.x + 1;
int h = r.y2 - r.y + 1;
Console.WriteLine("[*] hwnd {0:x} @ {1}x{2} sz {3}x{4}", it, r.x, r.y, w, h);
if (h != 141)
continue;
ShowWindow(it, 6);
Console.WriteLine("[+] poof");
return true;
}
return false;
}
void fb() {
keybd_event((byte)Keys.VolumeMute, 0, 0, 0);
keybd_event((byte)Keys.VolumeMute, 0, 2, 0);
Thread.Sleep(500);
keybd_event((byte)Keys.VolumeMute, 0, 0, 0);
keybd_event((byte)Keys.VolumeMute, 0, 2, 0);
while (true) {
if (fa()) {
break;
}
Console.WriteLine("[!] not found");
Thread.Sleep(1000);
}
this.Invoke((MethodInvoker)delegate {
Application.Exit();
});
}
public void Run() {
Console.WriteLine("[+] hi");
new Thread(new ThreadStart(fb)).Start();
Application.Run();
Console.WriteLine("[+] bye");
}
}
}
"@ -ReferencedAssemblies System.Windows.Forms
(New-Object -TypeName A.B).Run()

View File

@@ -1,15 +1,16 @@
# when running copyparty behind a reverse proxy,
# the following arguments are recommended:
#
# -nc 512 important, see next paragraph
# --http-only lower latency on initial connection
# -i 127.0.0.1 only accept connections from nginx
#
# -nc must match or exceed the webserver's max number of concurrent clients;
# copyparty default is 1024 if OS permits it (see "max clients:" on startup),
# nginx default is 512 (worker_processes 1, worker_connections 512)
#
# you may also consider adding -j0 for CPU-intensive configurations
# (not that i can really think of any good examples)
# (5'000 requests per second, or 20gbps upload/download in parallel)
#
# on fedora/rhel, remember to setsebool -P httpd_can_network_connect 1
upstream cpp {
server 127.0.0.1:3923;
@@ -37,3 +38,9 @@ server {
proxy_set_header Connection "Keep-Alive";
}
}
# default client_max_body_size (1M) blocks uploads larger than 256 MiB
client_max_body_size 1024M;
client_header_timeout 610m;
client_body_timeout 610m;
send_timeout 610m;

View File

@@ -0,0 +1,281 @@
{ config, pkgs, lib, ... }:
with lib;
let
mkKeyValue = key: value:
if value == true then
# sets with a true boolean value are coerced to just the key name
key
else if value == false then
# or omitted completely when false
""
else
(generators.mkKeyValueDefault { inherit mkValueString; } ": " key value);
mkAttrsString = value: (generators.toKeyValue { inherit mkKeyValue; } value);
mkValueString = value:
if isList value then
(concatStringsSep ", " (map mkValueString value))
else if isAttrs value then
"\n" + (mkAttrsString value)
else
(generators.mkValueStringDefault { } value);
mkSectionName = value: "[" + (escape [ "[" "]" ] value) + "]";
mkSection = name: attrs: ''
${mkSectionName name}
${mkAttrsString attrs}
'';
mkVolume = name: attrs: ''
${mkSectionName name}
${attrs.path}
${mkAttrsString {
accs = attrs.access;
flags = attrs.flags;
}}
'';
passwordPlaceholder = name: "{{password-${name}}}";
accountsWithPlaceholders = mapAttrs (name: attrs: passwordPlaceholder name);
configStr = ''
${mkSection "global" cfg.settings}
${mkSection "accounts" (accountsWithPlaceholders cfg.accounts)}
${concatStringsSep "\n" (mapAttrsToList mkVolume cfg.volumes)}
'';
name = "copyparty";
cfg = config.services.copyparty;
configFile = pkgs.writeText "${name}.conf" configStr;
runtimeConfigPath = "/run/${name}/${name}.conf";
home = "/var/lib/${name}";
defaultShareDir = "${home}/data";
in {
options.services.copyparty = {
enable = mkEnableOption "web-based file manager";
package = mkOption {
type = types.package;
default = pkgs.copyparty;
defaultText = "pkgs.copyparty";
description = ''
Package of the application to run, exposed for overriding purposes.
'';
};
openFilesLimit = mkOption {
default = 4096;
type = types.either types.int types.str;
description = "Number of files to allow copyparty to open.";
};
settings = mkOption {
type = types.attrs;
description = ''
Global settings to apply.
Directly maps to values in the [global] section of the copyparty config.
See `${getExe cfg.package} --help` for more details.
'';
default = {
i = "127.0.0.1";
no-reload = true;
};
example = literalExpression ''
{
i = "0.0.0.0";
no-reload = true;
}
'';
};
accounts = mkOption {
type = types.attrsOf (types.submodule ({ ... }: {
options = {
passwordFile = mkOption {
type = types.str;
description = ''
Runtime file path to a file containing the user password.
Must be readable by the copyparty user.
'';
example = "/run/keys/copyparty/ed";
};
};
}));
description = ''
A set of copyparty accounts to create.
'';
default = { };
example = literalExpression ''
{
ed.passwordFile = "/run/keys/copyparty/ed";
};
'';
};
volumes = mkOption {
type = types.attrsOf (types.submodule ({ ... }: {
options = {
path = mkOption {
type = types.str;
description = ''
Path of a directory to share.
'';
};
access = mkOption {
type = types.attrs;
description = ''
Attribute list of permissions and the users to apply them to.
The key must be a string containing any combination of allowed permission:
"r" (read): list folder contents, download files
"w" (write): upload files; need "r" to see the uploads
"m" (move): move files and folders; need "w" at destination
"d" (delete): permanently delete files and folders
"g" (get): download files, but cannot see folder contents
"G" (upget): "get", but can see filekeys of their own uploads
For example: "rwmd"
The value must be one of:
an account name, defined in `accounts`
a list of account names
"*", which means "any account"
'';
example = literalExpression ''
{
# wG = write-upget = see your own uploads only
wG = "*";
# read-write-modify-delete for users "ed" and "k"
rwmd = ["ed" "k"];
};
'';
};
flags = mkOption {
type = types.attrs;
description = ''
Attribute list of volume flags to apply.
See `${getExe cfg.package} --help-flags` for more details.
'';
example = literalExpression ''
{
# "fk" enables filekeys (necessary for upget permission) (4 chars long)
fk = 4;
# scan for new files every 60sec
scan = 60;
# volflag "e2d" enables the uploads database
e2d = true;
# "d2t" disables multimedia parsers (in case the uploads are malicious)
d2t = true;
# skips hashing file contents if path matches *.iso
nohash = "\.iso$";
};
'';
default = { };
};
};
}));
description = "A set of copyparty volumes to create";
default = {
"/" = {
path = defaultShareDir;
access = { r = "*"; };
};
};
example = literalExpression ''
{
"/" = {
path = ${defaultShareDir};
access = {
# wG = write-upget = see your own uploads only
wG = "*";
# read-write-modify-delete for users "ed" and "k"
rwmd = ["ed" "k"];
};
};
};
'';
};
};
config = mkIf cfg.enable {
systemd.services.copyparty = {
description = "http file sharing hub";
wantedBy = [ "multi-user.target" ];
environment = {
PYTHONUNBUFFERED = "true";
XDG_CONFIG_HOME = "${home}/.config";
};
preStart = let
replaceSecretCommand = name: attrs:
"${getExe pkgs.replace-secret} '${
passwordPlaceholder name
}' '${attrs.passwordFile}' ${runtimeConfigPath}";
in ''
set -euo pipefail
install -m 600 ${configFile} ${runtimeConfigPath}
${concatStringsSep "\n"
(mapAttrsToList replaceSecretCommand cfg.accounts)}
'';
serviceConfig = {
Type = "simple";
ExecStart = "${getExe cfg.package} -c ${runtimeConfigPath}";
# Hardening options
User = "copyparty";
Group = "copyparty";
RuntimeDirectory = name;
RuntimeDirectoryMode = "0700";
StateDirectory = [ name "${name}/data" "${name}/.config" ];
StateDirectoryMode = "0700";
WorkingDirectory = home;
TemporaryFileSystem = "/:ro";
BindReadOnlyPaths = [
"/nix/store"
"-/etc/resolv.conf"
"-/etc/nsswitch.conf"
"-/etc/hosts"
"-/etc/localtime"
] ++ (mapAttrsToList (k: v: "-${v.passwordFile}") cfg.accounts);
BindPaths = [ home ] ++ (mapAttrsToList (k: v: v.path) cfg.volumes);
# Would re-mount paths ignored by temporary root
#ProtectSystem = "strict";
ProtectHome = true;
PrivateTmp = true;
PrivateDevices = true;
ProtectKernelTunables = true;
ProtectControlGroups = true;
RestrictSUIDSGID = true;
PrivateMounts = true;
ProtectKernelModules = true;
ProtectKernelLogs = true;
ProtectHostname = true;
ProtectClock = true;
ProtectProc = "invisible";
ProcSubset = "pid";
RestrictNamespaces = true;
RemoveIPC = true;
UMask = "0077";
LimitNOFILE = cfg.openFilesLimit;
NoNewPrivileges = true;
LockPersonality = true;
RestrictRealtime = true;
};
};
users.groups.copyparty = { };
users.users.copyparty = {
description = "Service user for copyparty";
group = "copyparty";
home = home;
isSystemUser = true;
};
};
}

View File

@@ -14,5 +14,5 @@ name="$SVCNAME"
command_background=true
pidfile="/var/run/$SVCNAME.pid"
command="/usr/bin/python /usr/local/bin/copyparty-sfx.py"
command="/usr/bin/python3 /usr/local/bin/copyparty-sfx.py"
command_args="-q -v /mnt::rw"

View File

@@ -0,0 +1,57 @@
# Maintainer: icxes <dev.null@need.moe>
pkgname=copyparty
pkgver="1.6.13"
pkgrel=1
pkgdesc="Portable file sharing hub"
arch=("any")
url="https://github.com/9001/${pkgname}"
license=('MIT')
depends=("python" "lsof")
optdepends=("ffmpeg: thumbnails for videos, images (slower) and audio, music tags"
"python-jinja: faster html generator"
"python-mutagen: music tags (alternative)"
"python-pillow: thumbnails for images"
"python-pyvips: thumbnails for images (higher quality, faster, uses more ram)"
"libkeyfinder-git: detection of musical keys"
"qm-vamp-plugins: BPM detection"
"python-pyopenssl: ftps functionality"
"python-impacket-git: smb support (bad idea)"
)
source=("${url}/releases/download/v${pkgver}/${pkgname}-sfx.py"
"${pkgname}.conf"
"${pkgname}.service"
"prisonparty.service"
"index.md"
"https://raw.githubusercontent.com/9001/${pkgname}/v${pkgver}/bin/prisonparty.sh"
"https://raw.githubusercontent.com/9001/${pkgname}/v${pkgver}/LICENSE"
)
backup=("etc/${pkgname}.d/init" )
sha256sums=("96b47616c6e671eb2f4a563671726824eaea201b38d138543c8ddd0ae71797bc"
"b8565eba5e64dedba1cf6c7aac7e31c5a731ed7153d6810288a28f00a36c28b2"
"f65c207e0670f9d78ad2e399bda18d5502ff30d2ac79e0e7fc48e7fbdc39afdc"
"c4f396b083c9ec02ad50b52412c84d2a82be7f079b2d016e1c9fad22d68285ff"
"dba701de9fd584405917e923ea1e59dbb249b96ef23bad479cf4e42740b774c8"
"8e89d281483e22d11d111bed540652af35b66af6f14f49faae7b959f6cdc6475"
"cb2ce3d6277bf2f5a82ecf336cc44963bc6490bcf496ffbd75fc9e21abaa75f3"
)
package() {
cd "${srcdir}/"
install -dm755 "${pkgdir}/etc/${pkgname}.d"
install -Dm755 "${pkgname}-sfx.py" "${pkgdir}/usr/bin/${pkgname}"
install -Dm755 "prisonparty.sh" "${pkgdir}/usr/bin/prisonparty"
install -Dm644 "${pkgname}.conf" "${pkgdir}/etc/${pkgname}.d/init"
install -Dm644 "${pkgname}.service" "${pkgdir}/usr/lib/systemd/system/${pkgname}.service"
install -Dm644 "prisonparty.service" "${pkgdir}/usr/lib/systemd/system/prisonparty.service"
install -Dm644 "index.md" "${pkgdir}/var/lib/${pkgname}-jail/README.md"
install -Dm644 "LICENSE" "${pkgdir}/usr/share/licenses/${pkgname}/LICENSE"
find /etc/${pkgname}.d -iname '*.conf' 2>/dev/null | grep -qE . && return
echo "┏━━━━━━━━━━━━━━━──-"
echo "┃ Configure ${pkgname} by adding .conf files into /etc/${pkgname}.d/"
echo "┃ and maybe copy+edit one of the following to /etc/systemd/system/:"
echo "┣━♦ /usr/lib/systemd/system/${pkgname}.service (standard)"
echo "┣━♦ /usr/lib/systemd/system/prisonparty.service (chroot)"
echo "┗━━━━━━━━━━━━━━━──-"
}

View File

@@ -0,0 +1,7 @@
## import all *.conf files from the current folder (/etc/copyparty.d)
% ./
# add additional .conf files to this folder;
# see example config files for reference:
# https://github.com/9001/copyparty/blob/hovudstraum/docs/example.conf
# https://github.com/9001/copyparty/tree/hovudstraum/docs/copyparty.d

View File

@@ -0,0 +1,32 @@
# this will start `/usr/bin/copyparty-sfx.py`
# and read config from `/etc/copyparty.d/*.conf`
#
# you probably want to:
# change "User=cpp" and "/home/cpp/" to another user
#
# unless you add -q to disable logging, you may want to remove the
# following line to allow buffering (slightly better performance):
# Environment=PYTHONUNBUFFERED=x
[Unit]
Description=copyparty file server
[Service]
Type=notify
SyslogIdentifier=copyparty
Environment=PYTHONUNBUFFERED=x
WorkingDirectory=/var/lib/copyparty-jail
ExecReload=/bin/kill -s USR1 $MAINPID
# user to run as + where the TLS certificate is (if any)
User=cpp
Environment=XDG_CONFIG_HOME=/home/cpp/.config
# stop systemd-tmpfiles-clean.timer from deleting copyparty while it's running
ExecStartPre=+/bin/bash -c 'mkdir -p /run/tmpfiles.d/ && echo "x /tmp/pe-copyparty*" > /run/tmpfiles.d/copyparty.conf'
# run copyparty
ExecStart=/usr/bin/python3 /usr/bin/copyparty -c /etc/copyparty.d/init
[Install]
WantedBy=multi-user.target

View File

@@ -0,0 +1,3 @@
this is `/var/lib/copyparty-jail`, the fallback webroot when copyparty has not yet been configured
please add some `*.conf` files to `/etc/copyparty.d/`

View File

@@ -0,0 +1,31 @@
# this will start `/usr/bin/copyparty-sfx.py`
# in a chroot, preventing accidental access elsewhere
# and read config from `/etc/copyparty.d/*.conf`
#
# expose additional filesystem locations to copyparty
# by listing them between the last `1000` and `--`
#
# `1000 1000` = what user to run copyparty as
#
# unless you add -q to disable logging, you may want to remove the
# following line to allow buffering (slightly better performance):
# Environment=PYTHONUNBUFFERED=x
[Unit]
Description=copyparty file server
[Service]
SyslogIdentifier=prisonparty
Environment=PYTHONUNBUFFERED=x
WorkingDirectory=/var/lib/copyparty-jail
ExecReload=/bin/kill -s USR1 $MAINPID
# stop systemd-tmpfiles-clean.timer from deleting copyparty while it's running
ExecStartPre=+/bin/bash -c 'mkdir -p /run/tmpfiles.d/ && echo "x /tmp/pe-copyparty*" > /run/tmpfiles.d/copyparty.conf'
# run copyparty
ExecStart=/bin/bash /usr/bin/prisonparty /var/lib/copyparty-jail 1000 1000 /etc/copyparty.d -- \
/usr/bin/python3 /usr/bin/copyparty -c /etc/copyparty.d/init
[Install]
WantedBy=multi-user.target

View File

@@ -0,0 +1,55 @@
{ lib, stdenv, makeWrapper, fetchurl, utillinux, python, jinja2, impacket, pyftpdlib, pyopenssl, pillow, pyvips, ffmpeg, mutagen,
# create thumbnails with Pillow; faster than FFmpeg / MediaProcessing
withThumbnails ? true,
# create thumbnails with PyVIPS; even faster, uses more memory
# -- can be combined with Pillow to support more filetypes
withFastThumbnails ? false,
# enable FFmpeg; thumbnails for most filetypes (also video and audio), extract audio metadata, transcode audio to opus
# -- possibly dangerous if you allow anonymous uploads, since FFmpeg has a huge attack surface
# -- can be combined with Thumbnails and/or FastThumbnails, since FFmpeg is slower than both
withMediaProcessing ? true,
# if MediaProcessing is not enabled, you probably want this instead (less accurate, but much safer and faster)
withBasicAudioMetadata ? false,
# enable FTPS support in the FTP server
withFTPS ? false,
# samba/cifs server; dangerous and buggy, enable if you really need it
withSMB ? false,
}:
let
pinData = lib.importJSON ./pin.json;
pyEnv = python.withPackages (ps:
with ps; [
jinja2
]
++ lib.optional withSMB impacket
++ lib.optional withFTPS pyopenssl
++ lib.optional withThumbnails pillow
++ lib.optional withFastThumbnails pyvips
++ lib.optional withMediaProcessing ffmpeg
++ lib.optional withBasicAudioMetadata mutagen
);
in stdenv.mkDerivation {
pname = "copyparty";
version = pinData.version;
src = fetchurl {
url = pinData.url;
hash = pinData.hash;
};
buildInputs = [ makeWrapper ];
dontUnpack = true;
dontBuild = true;
installPhase = ''
install -Dm755 $src $out/share/copyparty-sfx.py
makeWrapper ${pyEnv.interpreter} $out/bin/copyparty \
--set PATH '${lib.makeBinPath ([ utillinux ] ++ lib.optional withMediaProcessing ffmpeg)}:$PATH' \
--add-flags "$out/share/copyparty-sfx.py"
'';
}

View File

@@ -0,0 +1,5 @@
{
"url": "https://github.com/9001/copyparty/releases/download/v1.6.13/copyparty-sfx.py",
"version": "1.6.13",
"hash": "sha256-lrR2FsbmcesvSlY2cXJoJOrqIBs40ThUPI3dCucXl7w="
}

View File

@@ -0,0 +1,77 @@
#!/usr/bin/env python3
# Update the Nix package pin
#
# Usage: ./update.sh [PATH]
# When the [PATH] is not set, it will fetch the latest release from the repo.
# With [PATH] set, it will hash the given file and generate the URL,
# base on the version contained within the file
import base64
import json
import hashlib
import sys
import re
from pathlib import Path
OUTPUT_FILE = Path("pin.json")
TARGET_ASSET = "copyparty-sfx.py"
HASH_TYPE = "sha256"
LATEST_RELEASE_URL = "https://api.github.com/repos/9001/copyparty/releases/latest"
DOWNLOAD_URL = lambda version: f"https://github.com/9001/copyparty/releases/download/v{version}/{TARGET_ASSET}"
def get_formatted_hash(binary):
hasher = hashlib.new("sha256")
hasher.update(binary)
asset_hash = hasher.digest()
encoded_hash = base64.b64encode(asset_hash).decode("ascii")
return f"{HASH_TYPE}-{encoded_hash}"
def version_from_sfx(binary):
result = re.search(b'^VER = "(.*)"$', binary, re.MULTILINE)
if result:
return result.groups(1)[0].decode("ascii")
raise ValueError("version not found in provided file")
def remote_release_pin():
import requests
response = requests.get(LATEST_RELEASE_URL).json()
version = response["tag_name"].lstrip("v")
asset_info = [a for a in response["assets"] if a["name"] == TARGET_ASSET][0]
download_url = asset_info["browser_download_url"]
asset = requests.get(download_url)
formatted_hash = get_formatted_hash(asset.content)
result = {"url": download_url, "version": version, "hash": formatted_hash}
return result
def local_release_pin(path):
asset = path.read_bytes()
version = version_from_sfx(asset)
download_url = DOWNLOAD_URL(version)
formatted_hash = get_formatted_hash(asset)
result = {"url": download_url, "version": version, "hash": formatted_hash}
return result
def main():
if len(sys.argv) > 1:
asset_path = Path(sys.argv[1])
result = local_release_pin(asset_path)
else:
result = remote_release_pin()
print(result)
json_result = json.dumps(result, indent=4)
OUTPUT_FILE.write_text(json_result)
if __name__ == "__main__":
main()

View File

@@ -11,6 +11,15 @@ save one of these as `.epilogue.html` inside a folder to customize it:
## example browser-js
point `--js-browser` to one of these by URL:
* [`minimal-up2k.js`](minimal-up2k.js) is similar to the above `minimal-up2k.html` except it applies globally to all write-only folders
* [`up2k-hooks.js`](up2k-hooks.js) lets you specify a ruleset for files to skip uploading
* [`up2k-hook-ytid.js`](up2k-hook-ytid.js) is a more specific example checking youtube-IDs against some API
## example browser-css
point `--css-browser` to one of these by URL:

View File

@@ -1,13 +1,22 @@
<!--
NOTE: DEPRECATED; please use the javascript version instead:
https://github.com/9001/copyparty/blob/hovudstraum/contrib/plugins/minimal-up2k.js
----
save this as .epilogue.html inside a write-only folder to declutter the UI, makes it look like
https://user-images.githubusercontent.com/241032/118311195-dd6ca380-b4ef-11eb-86f3-75a3ff2e1332.png
only works if you disable the prologue/epilogue sandbox with --no-sb-lg
which should probably be combined with --no-dot-ren to prevent damage
(`no_sb_lg` can also be set per-volume with volflags)
-->
<style>
/* make the up2k ui REALLY minimal by hiding a bunch of stuff: */
#ops, #tree, #path, #wrap>h2:last-child, /* main tabs and navigators (tree/breadcrumbs) */
#ops, #tree, #path, #wfp, /* main tabs and navigators (tree/breadcrumbs) */
#u2conf tr:first-child>td[rowspan]:not(#u2btn_cw), /* most of the config options */

View File

@@ -0,0 +1,59 @@
/*
makes the up2k ui REALLY minimal by hiding a bunch of stuff
almost the same as minimal-up2k.html except this one...:
-- applies to every write-only folder when used with --js-browser
-- only applies if javascript is enabled
-- doesn't hide the total upload ETA display
-- looks slightly better
*/
var u2min = `
<style>
#ops, #path, #tree, #files, #wfp,
#u2conf td.c+.c, #u2cards, #srch_dz, #srch_zd {
display: none !important;
}
#u2conf {margin:5em auto 0 auto !important}
#u2conf.ww {width:70em}
#u2conf.w {width:50em}
#u2conf.w .c,
#u2conf.w #u2btn_cw {text-align:left}
#u2conf.w #u2btn_cw {width:70%}
#u2etaw {margin:3em auto}
#u2etaw.w {
text-align: center;
margin: -3.5em auto 5em auto;
}
#u2etaw.w #u2etas {margin-right:-37em}
#u2etaw.w #u2etas.o {margin-top:-2.2em}
#u2etaw.ww {margin:-1em auto}
#u2etaw.ww #u2etas {padding-left:4em}
#u2etas {
background: none !important;
border: none !important;
}
#wrap {margin-left:2em !important}
.logue {
border: none !important;
margin: 2em auto !important;
}
.logue:before {content:'' !important}
</style>
<a href="#" onclick="this.parentNode.innerHTML='';">show advanced options</a>
`;
if (!has(perms, 'read')) {
var e2 = mknod('div');
e2.innerHTML = u2min;
ebi('wrap').insertBefore(e2, QS('#wfp'));
}

208
contrib/plugins/rave.js Normal file
View File

@@ -0,0 +1,208 @@
/* untz untz untz untz */
(function () {
var can, ctx, W, H, fft, buf, bars, barw, pv,
hue = 0,
ibeat = 0,
beats = [9001],
beats_url = '',
uofs = 0,
ops = ebi('ops'),
raving = false,
recalc = 0,
cdown = 0,
FC = 0.9,
css = `<style>
#fft {
position: fixed;
top: 0;
left: 0;
z-index: -1;
}
body {
box-shadow: inset 0 0 0 white;
}
#ops>a,
#path>a {
display: inline-block;
}
/*
body.untz {
animation: untz-body 200ms ease-out;
}
@keyframes untz-body {
0% {inset 0 0 20em white}
100% {inset 0 0 0 white}
}
*/
:root, html.a, html.b, html.c, html.d, html.e {
--row-alt: rgba(48,52,78,0.2);
}
#files td {
background: none;
}
</style>`;
QS('body').appendChild(mknod('div', null, css));
function rave_load() {
console.log('rave_load');
can = mknod('canvas', 'fft');
QS('body').appendChild(can);
ctx = can.getContext('2d');
fft = new AnalyserNode(actx, {
"fftSize": 2048,
"maxDecibels": 0,
"smoothingTimeConstant": 0.7,
});
ibeat = 0;
beats = [9001];
buf = new Uint8Array(fft.frequencyBinCount);
bars = buf.length * FC;
afilt.filters.push(fft);
if (!raving) {
raving = true;
raver();
}
beats_url = mp.au.src.split('?')[0].replace(/(.*\/)(.*)/, '$1.beats/$2.txt');
console.log("reading beats from", beats_url);
var xhr = new XHR();
xhr.open('GET', beats_url, true);
xhr.onload = readbeats;
xhr.url = beats_url;
xhr.send();
}
function rave_unload() {
qsr('#fft');
can = null;
}
function readbeats() {
if (this.url != beats_url)
return console.log('old beats??', this.url, beats_url);
var sbeats = this.responseText.replace(/\r/g, '').split(/\n/g);
if (sbeats.length < 3)
return;
beats = [];
for (var a = 0; a < sbeats.length; a++)
beats.push(parseFloat(sbeats[a]));
var end = beats.slice(-2),
t = end[1],
d = t - end[0];
while (d > 0.1 && t < 1200)
beats.push(t += d);
}
function hrand() {
return Math.random() - 0.5;
}
function raver() {
if (!can) {
raving = false;
return;
}
requestAnimationFrame(raver);
if (!mp || !mp.au || mp.au.paused)
return;
if (--uofs >= 0) {
document.body.style.marginLeft = hrand() * uofs + 'px';
ebi('tree').style.marginLeft = hrand() * uofs + 'px';
for (var a of QSA('#ops>a, #path>a, #pctl>a'))
a.style.transform = 'translate(' + hrand() * uofs * 1 + 'px, ' + hrand() * uofs * 0.7 + 'px) rotate(' + Math.random() * uofs * 0.7 + 'deg)'
}
if (--recalc < 0) {
recalc = 60;
var tree = ebi('tree'),
x = tree.style.display == 'none' ? 0 : tree.offsetWidth;
//W = can.width = window.innerWidth - x;
//H = can.height = window.innerHeight;
//H = ebi('widget').offsetTop;
W = can.width = bars;
H = can.height = 512;
barw = 1; //parseInt(0.8 + W / bars);
can.style.left = x + 'px';
can.style.width = (window.innerWidth - x) + 'px';
can.style.height = ebi('widget').offsetTop + 'px';
}
//if (--cdown == 1)
// clmod(ops, 'untz');
fft.getByteFrequencyData(buf);
var imax = 0, vmax = 0;
for (var a = 10; a < 50; a++)
if (vmax < buf[a]) {
vmax = buf[a];
imax = a;
}
hue = hue * 0.93 + imax * 0.07;
ctx.fillStyle = 'rgba(0,0,0,0)';
ctx.fillRect(0, 0, W, H);
ctx.clearRect(0, 0, W, H);
ctx.fillStyle = 'hsla(' + (hue * 2.5) + ',100%,50%,0.7)';
var x = 0, mul = (H / 256) * 0.5;
for (var a = 0; a < buf.length * FC; a++) {
var v = buf[a] * mul * (1 + 0.69 * a / buf.length);
ctx.fillRect(x, H - v, barw, v);
x += barw;
}
var t = mp.au.currentTime + 0.05;
if (ibeat >= beats.length || beats[ibeat] > t)
return;
while (ibeat < beats.length && beats[ibeat++] < t)
continue;
return untz();
var cv = 0;
for (var a = 0; a < 128; a++)
cv += buf[a];
if (cv - pv > 1000) {
console.log(pv, cv, cv - pv);
if (cdown < 0) {
clmod(ops, 'untz', 1);
cdown = 20;
}
}
pv = cv;
}
function untz() {
console.log('untz');
uofs = 14;
document.body.animate([
{ boxShadow: 'inset 0 0 1em #f0c' },
{ boxShadow: 'inset 0 0 20em #f0c', offset: 0.2 },
{ boxShadow: 'inset 0 0 0 #f0c' },
], { duration: 200, iterations: 1 });
}
afilt.plugs.push({
"en": true,
"load": rave_load,
"unload": rave_unload
});
})();

View File

@@ -0,0 +1,297 @@
// way more specific example --
// assumes all files dropped into the uploader have a youtube-id somewhere in the filename,
// locates the youtube-ids and passes them to an API which returns a list of IDs which should be uploaded
//
// also tries to find the youtube-id in the embedded metadata
//
// assumes copyparty is behind nginx as /ytq is a standalone service which must be rproxied in place
function up2k_namefilter(good_files, nil_files, bad_files, hooks) {
var passthru = up2k.uc.fsearch;
if (passthru)
return hooks[0](good_files, nil_files, bad_files, hooks.slice(1));
a_up2k_namefilter(good_files, nil_files, bad_files, hooks).then(() => { });
}
// ebi('op_up2k').appendChild(mknod('input','unick'));
function bstrpos(buf, ptn) {
var ofs = 0,
ch0 = ptn[0],
sz = buf.byteLength;
while (true) {
ofs = buf.indexOf(ch0, ofs);
if (ofs < 0 || ofs >= sz)
return -1;
for (var a = 1; a < ptn.length; a++)
if (buf[ofs + a] !== ptn[a])
break;
if (a === ptn.length)
return ofs;
++ofs;
}
}
async function a_up2k_namefilter(good_files, nil_files, bad_files, hooks) {
var t0 = Date.now(),
yt_ids = new Set(),
textdec = new TextDecoder('latin1'),
md_ptn = new TextEncoder().encode('youtube.com/watch?v='),
file_ids = [], // all IDs found for each good_files
md_only = [], // `${id} ${fn}` where ID was only found in metadata
mofs = 0,
mnchk = 0,
mfile = '',
myid = localStorage.getItem('ytid_t0');
if (!myid)
localStorage.setItem('ytid_t0', myid = Date.now());
for (var a = 0; a < good_files.length; a++) {
var [fobj, name] = good_files[a],
cname = name, // will clobber
sz = fobj.size,
ids = [],
fn_ids = [],
md_ids = [],
id_ok = false,
m;
// all IDs found in this file
file_ids.push(ids);
// look for ID in filename; reduce the
// metadata-scan intensity if the id looks safe
m = /[\[(-]([\w-]{11})[\])]?\.(?:mp4|webm|mkv|flv|opus|ogg|mp3|m4a|aac)$/i.exec(name);
id_ok = !!m;
while (true) {
// fuzzy catch-all;
// some ytdl fork did %(title)-%(id).%(ext) ...
m = /(?:^|[^\w])([\w-]{11})(?:$|[^\w-])/.exec(cname);
if (!m)
break;
cname = cname.replace(m[1], '');
yt_ids.add(m[1]);
fn_ids.unshift(m[1]);
}
// look for IDs in video metadata,
if (/\.(mp4|webm|mkv|flv|opus|ogg|mp3|m4a|aac)$/i.exec(name)) {
toast.show('inf r', 0, `analyzing file ${a + 1} / ${good_files.length} :\n${name}\n\nhave analysed ${++mnchk} files in ${(Date.now() - t0) / 1000} seconds, ${humantime((good_files.length - (a + 1)) * (((Date.now() - t0) / 1000) / mnchk))} remaining,\n\nbiggest offset so far is ${mofs}, in this file:\n\n${mfile}`);
// check first and last 128 MiB;
// pWxOroN5WCo.mkv @ 6edb98 (6.92M)
// Nf-nN1wF5Xo.mp4 @ 4a98034 (74.6M)
var chunksz = 1024 * 1024 * 2, // byte
aspan = id_ok ? 128 : 512; // MiB
aspan = parseInt(Math.min(sz / 2, aspan * 1024 * 1024) / chunksz) * chunksz;
if (!aspan)
aspan = Math.min(sz, chunksz);
for (var side = 0; side < 2; side++) {
var ofs = side ? Math.max(0, sz - aspan) : 0,
nchunks = aspan / chunksz;
for (var chunk = 0; chunk < nchunks; chunk++) {
var bchunk = await fobj.slice(ofs, ofs + chunksz + 16).arrayBuffer(),
uchunk = new Uint8Array(bchunk, 0, bchunk.byteLength),
bofs = bstrpos(uchunk, md_ptn),
absofs = Math.min(ofs + bofs, (sz - ofs) + bofs),
txt = bofs < 0 ? '' : textdec.decode(uchunk.subarray(bofs)),
m;
//console.log(`side ${ side }, chunk ${ chunk }, ofs ${ ofs }, bchunk ${ bchunk.byteLength }, txt ${ txt.length }`);
while (true) {
// mkv/webm have [a-z] immediately after url
m = /(youtube\.com\/watch\?v=[\w-]{11})/.exec(txt);
if (!m)
break;
txt = txt.replace(m[1], '');
m = m[1].slice(-11);
console.log(`found ${m} @${bofs}, ${name} `);
yt_ids.add(m);
if (!has(fn_ids, m) && !has(md_ids, m)) {
md_ids.push(m);
md_only.push(`${m} ${name}`);
}
else
// id appears several times; make it preferred
md_ids.unshift(m);
// bail after next iteration
chunk = nchunks - 1;
side = 9;
if (mofs < absofs) {
mofs = absofs;
mfile = name;
}
}
ofs += chunksz;
if (ofs >= sz)
break;
}
}
}
for (var yi of md_ids)
ids.push(yi);
for (var yi of fn_ids)
if (!has(ids, yi))
ids.push(yi);
}
if (md_only.length)
console.log('recovered the following youtube-IDs by inspecting metadata:\n\n' + md_only.join('\n'));
else if (yt_ids.size)
console.log('did not discover any additional youtube-IDs by inspecting metadata; all the IDs also existed in the filenames');
else
console.log('failed to find any youtube-IDs at all, sorry');
if (false) {
var msg = `finished analysing ${mnchk} files in ${(Date.now() - t0) / 1000} seconds,\n\nbiggest offset was ${mofs} in this file:\n\n${mfile}`,
mfun = function () { toast.ok(0, msg); };
mfun();
setTimeout(mfun, 200);
return hooks[0]([], [], [], hooks.slice(1));
}
var el = ebi('unick'), unick = el ? el.value : '';
if (unick) {
console.log(`sending uploader nickname [${unick}]`);
fetch(document.location, {
method: 'POST',
headers: { 'Content-Type': 'application/x-www-form-urlencoded;charset=UTF-8' },
body: 'msg=' + encodeURIComponent(unick)
});
}
toast.inf(5, `running query for ${yt_ids.size} youtube-IDs...`);
var xhr = new XHR();
xhr.open('POST', '/ytq', true);
xhr.setRequestHeader('Content-Type', 'text/plain');
xhr.onload = xhr.onerror = function () {
if (this.status != 200)
return toast.err(0, `sorry, database query failed ;_;\n\nplease let us know so we can look at it, thx!!\n\nerror ${this.status}: ${(this.response && this.response.err) || this.responseText}`);
process_id_list(this.responseText);
};
xhr.send(Array.from(yt_ids).join('\n'));
function process_id_list(txt) {
var wanted_ids = new Set(txt.trim().split('\n')),
name_id = {},
wanted_names = new Set(), // basenames with a wanted ID -- not including relpath
wanted_names_scoped = {}, // basenames with a wanted ID -> list of dirs to search under
wanted_files = new Set(); // filedrops
for (var a = 0; a < good_files.length; a++) {
var name = good_files[a][1];
for (var b = 0; b < file_ids[a].length; b++)
if (wanted_ids.has(file_ids[a][b])) {
// let the next stage handle this to prevent dupes
//wanted_files.add(good_files[a]);
var m = /(.*)\.(mp4|webm|mkv|flv|opus|ogg|mp3|m4a|aac)$/i.exec(name);
if (!m)
continue;
var [rd, fn] = vsplit(m[1]);
if (fn in wanted_names_scoped)
wanted_names_scoped[fn].push(rd);
else
wanted_names_scoped[fn] = [rd];
wanted_names.add(fn);
name_id[m[1]] = file_ids[a][b];
break;
}
}
// add all files with the same basename as each explicitly wanted file
// (infojson/chatlog/etc when ID was discovered from metadata)
for (var a = 0; a < good_files.length; a++) {
var [rd, name] = vsplit(good_files[a][1]);
for (var b = 0; b < 3; b++) {
name = name.replace(/\.[^\.]+$/, '');
if (!wanted_names.has(name))
continue;
var vid_fp = false;
for (var c of wanted_names_scoped[name])
if (rd.startsWith(c))
vid_fp = c + name;
if (!vid_fp)
continue;
var subdir = name_id[vid_fp];
subdir = `v${subdir.slice(0, 1)}/${subdir}-${myid}`;
var newpath = subdir + '/' + good_files[a][1].split(/\//g).pop();
// check if this file is a dupe
for (var c of good_files)
if (c[1] == newpath)
newpath = null;
if (!newpath)
break;
good_files[a][1] = newpath;
wanted_files.add(good_files[a]);
break;
}
}
function upload_filtered() {
if (!wanted_files.size)
return modal.alert('Good news -- turns out we already have all those.\n\nBut thank you for checking in!');
hooks[0](Array.from(wanted_files), nil_files, bad_files, hooks.slice(1));
}
function upload_all() {
hooks[0](good_files, nil_files, bad_files, hooks.slice(1));
}
var n_skip = good_files.length - wanted_files.size,
msg = `you added ${good_files.length} files; ${good_files.length == n_skip ? 'all' : n_skip} of them were skipped --\neither because we already have them,\nor because there is no youtube-ID in your filenames.\n\n<code>OK</code> / <code>Enter</code> = continue uploading just the ${wanted_files.size} files we definitely need\n\n<code>Cancel</code> / <code>ESC</code> = override the filter; upload ALL the files you added`;
if (!n_skip)
upload_filtered();
else
modal.confirm(msg, upload_filtered, upload_all);
};
}
up2k_hooks.push(function () {
up2k.gotallfiles.unshift(up2k_namefilter);
});
// persist/restore nickname field if present
setInterval(function () {
var o = ebi('unick');
if (!o || document.activeElement == o)
return;
o.oninput = function () {
localStorage.setItem('unick', o.value);
};
o.value = localStorage.getItem('unick') || '';
}, 1000);

View File

@@ -0,0 +1,45 @@
// hooks into up2k
function up2k_namefilter(good_files, nil_files, bad_files, hooks) {
// is called when stuff is dropped into the browser,
// after iterating through the directory tree and discovering all files,
// before the upload confirmation dialogue is shown
// good_files will successfully upload
// nil_files are empty files and will show an alert in the final hook
// bad_files are unreadable and cannot be uploaded
var file_lists = [good_files, nil_files, bad_files];
// build a list of filenames
var filenames = [];
for (var lst of file_lists)
for (var ent of lst)
filenames.push(ent[1]);
toast.inf(5, "running database query...");
// simulate delay while passing the list to some api for checking
setTimeout(function () {
// only keep webm files as an example
var new_lists = [];
for (var lst of file_lists) {
var keep = [];
new_lists.push(keep);
for (var ent of lst)
if (/\.webm$/.test(ent[1]))
keep.push(ent);
}
// finally, call the next hook in the chain
[good_files, nil_files, bad_files] = new_lists;
hooks[0](good_files, nil_files, bad_files, hooks.slice(1));
}, 1000);
}
// register
up2k_hooks.push(function () {
up2k.gotallfiles.unshift(up2k_namefilter);
});

View File

@@ -0,0 +1,23 @@
# systemd service which generates a new TLS certificate on each boot,
# that way the one-year expiry time won't cause any issues --
# just have everyone trust the ca.pem once every 10 years
#
# assumptions/placeholder values:
# * this script and copyparty runs as user "cpp"
# * copyparty repo is at ~cpp/dev/copyparty
# * CA is named partylan
# * server IPs = 10.1.2.3 and 192.168.123.1
# * server hostname = party.lan
[Unit]
Description=copyparty certificate generator
Before=copyparty.service
[Service]
User=cpp
Type=oneshot
SyslogIdentifier=cpp-cert
ExecStart=/bin/bash -c 'cd ~/dev/copyparty/contrib && ./cfssl.sh partylan 10.1.2.3,192.168.123.1,party.lan y'
[Install]
WantedBy=multi-user.target

View File

@@ -2,16 +2,26 @@
# and share '/mnt' with anonymous read+write
#
# installation:
# cp -pv copyparty.service /etc/systemd/system && systemctl enable --now copyparty
# restorecon -vr /etc/systemd/system/copyparty.service
# firewall-cmd --permanent --add-port={80,443,3923}/tcp
# wget https://github.com/9001/copyparty/releases/latest/download/copyparty-sfx.py -O /usr/local/bin/copyparty-sfx.py
# cp -pv copyparty.service /etc/systemd/system/
# restorecon -vr /etc/systemd/system/copyparty.service # on fedora/rhel
# firewall-cmd --permanent --add-port={80,443,3923}/tcp # --zone=libvirt
# firewall-cmd --reload
# systemctl daemon-reload && systemctl enable --now copyparty
#
# if it fails to start, first check this: systemctl status copyparty
# then try starting it while viewing logs: journalctl -fan 100
#
# you may want to:
# change "User=cpp" and "/home/cpp/" to another user
# remove the nft lines to only listen on port 3923
# and in the ExecStart= line:
# change '/usr/bin/python3' to another interpreter
# change '/mnt::rw' to another location or permission-set
# remove '-p 80,443,3923' to only listen on port 3923
# add '-q' to disable logging on busy servers
# add '-i 127.0.0.1' to only allow local connections
# add '-e2dsa' to enable filesystem scanning + indexing
# add '-e2ts' to enable metadata indexing
#
# with `Type=notify`, copyparty will signal systemd when it is ready to
# accept connections; correctly delaying units depending on copyparty.
@@ -19,8 +29,8 @@
# python disabling line-buffering, so messages are out-of-order:
# https://user-images.githubusercontent.com/241032/126040249-cb535cc7-c599-4931-a796-a5d9af691bad.png
#
# if you remove -q to enable logging, you may also want to remove the
# following line to enable buffering (slightly better performance):
# unless you add -q to disable logging, you may want to remove the
# following line to allow buffering (slightly better performance):
# Environment=PYTHONUNBUFFERED=x
#
# keep ExecStartPre before ExecStart, at least on rhel8
@@ -33,8 +43,23 @@ Type=notify
SyslogIdentifier=copyparty
Environment=PYTHONUNBUFFERED=x
ExecReload=/bin/kill -s USR1 $MAINPID
ExecStartPre=/bin/bash -c 'mkdir -p /run/tmpfiles.d/ && echo "x /tmp/pe-copyparty*" > /run/tmpfiles.d/copyparty.conf'
ExecStart=/usr/bin/python3 /usr/local/bin/copyparty-sfx.py -q -p 80,443,3923 -v /mnt::rw
# user to run as + where the TLS certificate is (if any)
User=cpp
Environment=XDG_CONFIG_HOME=/home/cpp/.config
# OPTIONAL: setup forwarding from ports 80 and 443 to port 3923
ExecStartPre=+/bin/bash -c 'nft -n -a list table nat | awk "/ to :3923 /{print\$NF}" | xargs -rL1 nft delete rule nat prerouting handle; true'
ExecStartPre=+nft add table ip nat
ExecStartPre=+nft -- add chain ip nat prerouting { type nat hook prerouting priority -100 \; }
ExecStartPre=+nft add rule ip nat prerouting tcp dport 80 redirect to :3923
ExecStartPre=+nft add rule ip nat prerouting tcp dport 443 redirect to :3923
# stop systemd-tmpfiles-clean.timer from deleting copyparty while it's running
ExecStartPre=+/bin/bash -c 'mkdir -p /run/tmpfiles.d/ && echo "x /tmp/pe-copyparty*" > /run/tmpfiles.d/copyparty.conf'
# copyparty settings
ExecStart=/usr/bin/python3 /usr/local/bin/copyparty-sfx.py -e2d -v /mnt::rw
[Install]
WantedBy=multi-user.target

View File

@@ -6,12 +6,17 @@
# 1) put copyparty-sfx.py and prisonparty.sh in /usr/local/bin
# 2) cp -pv prisonparty.service /etc/systemd/system && systemctl enable --now prisonparty
#
# expose additional filesystem locations to copyparty
# by listing them between the last `1000` and `--`
#
# `1000 1000` = what user to run copyparty as
#
# you may want to:
# change '/mnt::rw' to another location or permission-set
# (remember to change the '/mnt' chroot arg too)
#
# enable line-buffering for realtime logging (slight performance cost):
# inside the [Service] block, add the following line:
# unless you add -q to disable logging, you may want to remove the
# following line to allow buffering (slightly better performance):
# Environment=PYTHONUNBUFFERED=x
[Unit]
@@ -19,7 +24,14 @@ Description=copyparty file server
[Service]
SyslogIdentifier=prisonparty
WorkingDirectory=/usr/local/bin
Environment=PYTHONUNBUFFERED=x
WorkingDirectory=/var/lib/copyparty-jail
ExecReload=/bin/kill -s USR1 $MAINPID
# stop systemd-tmpfiles-clean.timer from deleting copyparty while it's running
ExecStartPre=+/bin/bash -c 'mkdir -p /run/tmpfiles.d/ && echo "x /tmp/pe-copyparty*" > /run/tmpfiles.d/copyparty.conf'
# run copyparty
ExecStart=/bin/bash /usr/local/bin/prisonparty.sh /var/lib/copyparty-jail 1000 1000 /mnt -- \
/usr/bin/python3 /usr/local/bin/copyparty-sfx.py -q -v /mnt::rw

45
contrib/webdav-cfg.bat Normal file
View File

@@ -0,0 +1,45 @@
@echo off
rem removes the 47.6 MiB filesize limit when downloading from webdav
rem + optionally allows/enables password-auth over plaintext http
rem + optionally helps disable wpad, removing the 10sec latency
net session >nul 2>&1
if %errorlevel% neq 0 (
echo sorry, you must run this as administrator
pause
exit /b
)
reg add HKEY_LOCAL_MACHINE\SYSTEM\CurrentControlSet\services\WebClient\Parameters /v FileSizeLimitInBytes /t REG_DWORD /d 0xffffffff /f
reg add HKEY_LOCAL_MACHINE\SYSTEM\CurrentControlSet\Services\WebClient\Parameters /v FsCtlRequestTimeoutInSec /t REG_DWORD /d 0xffffffff /f
echo(
echo OK;
echo allow webdav basic-auth over plaintext http?
echo Y: login works, but the password will be visible in wireshark etc
echo N: login will NOT work unless you use https and valid certificates
choice
if %errorlevel% equ 1 (
reg add HKEY_LOCAL_MACHINE\SYSTEM\CurrentControlSet\services\WebClient\Parameters /v BasicAuthLevel /t REG_DWORD /d 0x2 /f
rem default is 1 (require tls)
)
echo(
echo OK;
echo do you want to disable wpad?
echo can give a HUGE speed boost depending on network settings
choice
if %errorlevel% equ 1 (
echo(
echo i'm about to open the [Connections] tab in [Internet Properties] for you;
echo please click [LAN settings] and disable [Automatically detect settings]
echo(
pause
control inetcpl.cpl,,4
)
net stop webclient
net start webclient
echo(
echo OK; all done
pause

View File

@@ -1,80 +1,53 @@
# coding: utf-8
from __future__ import print_function, unicode_literals
import platform
import time
import sys
import os
import platform
import sys
import time
PY2 = sys.version_info[0] == 2
if PY2:
sys.dont_write_bytecode = True
unicode = unicode
try:
from typing import TYPE_CHECKING
except:
TYPE_CHECKING = False
if True:
from typing import Any, Callable
PY2 = sys.version_info < (3,)
if not PY2:
unicode: Callable[[Any], str] = str
else:
unicode = str
sys.dont_write_bytecode = True
unicode = unicode # noqa: F821 # pylint: disable=undefined-variable,self-assigning-variable
WINDOWS = False
if platform.system() == "Windows":
WINDOWS = [int(x) for x in platform.version().split(".")]
WINDOWS: Any = (
[int(x) for x in platform.version().split(".")]
if platform.system() == "Windows"
else False
)
VT100 = not WINDOWS or WINDOWS >= [10, 0, 14393]
# introduced in anniversary update
ANYWIN = WINDOWS or sys.platform in ["msys"]
ANYWIN = WINDOWS or sys.platform in ["msys", "cygwin"]
MACOS = platform.system() == "Darwin"
EXE = bool(getattr(sys, "frozen", False))
def get_unixdir():
paths = [
(os.environ.get, "XDG_CONFIG_HOME"),
(os.path.expanduser, "~/.config"),
(os.environ.get, "TMPDIR"),
(os.environ.get, "TEMP"),
(os.environ.get, "TMP"),
(unicode, "/tmp"),
]
for chk in [os.listdir, os.mkdir]:
for pf, pa in paths:
try:
p = pf(pa)
# print(chk.__name__, p, pa)
if not p or p.startswith("~"):
continue
p = os.path.normpath(p)
chk(p)
p = os.path.join(p, "copyparty")
if not os.path.isdir(p):
os.mkdir(p)
return p
except:
pass
raise Exception("could not find a writable path for config")
try:
CORES = len(os.sched_getaffinity(0))
except:
CORES = (os.cpu_count() if hasattr(os, "cpu_count") else 0) or 2
class EnvParams(object):
def __init__(self):
def __init__(self) -> None:
self.t0 = time.time()
self.mod = os.path.dirname(os.path.realpath(__file__))
if self.mod.endswith("__init__"):
self.mod = os.path.dirname(self.mod)
if sys.platform == "win32":
self.cfg = os.path.normpath(os.environ["APPDATA"] + "/copyparty")
elif sys.platform == "darwin":
self.cfg = os.path.expanduser("~/Library/Preferences/copyparty")
else:
self.cfg = get_unixdir()
self.cfg = self.cfg.replace("\\", "/")
try:
os.makedirs(self.cfg)
except:
if not os.path.isdir(self.cfg):
raise
self.mod = ""
self.cfg = ""
self.ox = getattr(sys, "oxidized", None)
E = EnvParams()

1040
copyparty/__main__.py Normal file → Executable file

File diff suppressed because it is too large Load Diff

View File

@@ -1,8 +1,8 @@
# coding: utf-8
VERSION = (1, 2, 7)
CODENAME = "ftp btw"
BUILD_DT = (2022, 4, 16)
VERSION = (1, 6, 14)
CODENAME = "cors k"
BUILD_DT = (2023, 4, 24)
S_VERSION = ".".join(map(str, VERSION))
S_BUILD_DT = "{0:04d}-{1:02d}-{2:02d}".format(*BUILD_DT)

File diff suppressed because it is too large Load Diff

View File

@@ -2,56 +2,70 @@
from __future__ import print_function, unicode_literals
import os
from ..util import fsenc, fsdec, SYMTIME
from . import path
from ..util import SYMTIME, fsdec, fsenc
from . import path as path
if True: # pylint: disable=using-constant-test
from typing import Any, Optional
_ = (path,)
__all__ = ["path"]
# grep -hRiE '(^|[^a-zA-Z_\.-])os\.' . | gsed -r 's/ /\n/g;s/\(/(\n/g' | grep -hRiE '(^|[^a-zA-Z_\.-])os\.' | sort | uniq -c
# printf 'os\.(%s)' "$(grep ^def bos/__init__.py | gsed -r 's/^def //;s/\(.*//' | tr '\n' '|' | gsed -r 's/.$//')"
def chmod(p, mode):
def chmod(p: str, mode: int) -> None:
return os.chmod(fsenc(p), mode)
def listdir(p="."):
def listdir(p: str = ".") -> list[str]:
return [fsdec(x) for x in os.listdir(fsenc(p))]
def makedirs(name, mode=0o755, exist_ok=True):
def makedirs(name: str, mode: int = 0o755, exist_ok: bool = True) -> bool:
bname = fsenc(name)
try:
os.makedirs(bname, mode)
return True
except:
if not exist_ok or not os.path.isdir(bname):
raise
return False
def mkdir(p, mode=0o755):
def mkdir(p: str, mode: int = 0o755) -> None:
return os.mkdir(fsenc(p), mode)
def rename(src, dst):
def open(p: str, *a, **ka) -> int:
return os.open(fsenc(p), *a, **ka)
def rename(src: str, dst: str) -> None:
return os.rename(fsenc(src), fsenc(dst))
def replace(src, dst):
def replace(src: str, dst: str) -> None:
return os.replace(fsenc(src), fsenc(dst))
def rmdir(p):
def rmdir(p: str) -> None:
return os.rmdir(fsenc(p))
def stat(p):
def stat(p: str) -> os.stat_result:
return os.stat(fsenc(p))
def unlink(p):
def unlink(p: str) -> None:
return os.unlink(fsenc(p))
def utime(p, times=None, follow_symlinks=True):
def utime(
p: str, times: Optional[tuple[float, float]] = None, follow_symlinks: bool = True
) -> None:
if SYMTIME:
return os.utime(fsenc(p), times, follow_symlinks=follow_symlinks)
else:
@@ -60,7 +74,7 @@ def utime(p, times=None, follow_symlinks=True):
if hasattr(os, "lstat"):
def lstat(p):
def lstat(p: str) -> os.stat_result:
return os.lstat(fsenc(p))
else:

View File

@@ -2,43 +2,44 @@
from __future__ import print_function, unicode_literals
import os
from ..util import fsenc, fsdec, SYMTIME
from ..util import SYMTIME, fsdec, fsenc
def abspath(p):
def abspath(p: str) -> str:
return fsdec(os.path.abspath(fsenc(p)))
def exists(p):
def exists(p: str) -> bool:
return os.path.exists(fsenc(p))
def getmtime(p, follow_symlinks=True):
def getmtime(p: str, follow_symlinks: bool = True) -> float:
if not follow_symlinks and SYMTIME:
return os.lstat(fsenc(p)).st_mtime
else:
return os.path.getmtime(fsenc(p))
def getsize(p):
def getsize(p: str) -> int:
return os.path.getsize(fsenc(p))
def isfile(p):
def isfile(p: str) -> bool:
return os.path.isfile(fsenc(p))
def isdir(p):
def isdir(p: str) -> bool:
return os.path.isdir(fsenc(p))
def islink(p):
def islink(p: str) -> bool:
return os.path.islink(fsenc(p))
def lexists(p):
def lexists(p: str) -> bool:
return os.path.lexists(fsenc(p))
def realpath(p):
def realpath(p: str) -> str:
return fsdec(os.path.realpath(fsenc(p)))

View File

@@ -1,52 +1,64 @@
# coding: utf-8
from __future__ import print_function, unicode_literals
import time
import threading
import time
import traceback
from .broker_util import try_exec
import queue
from .__init__ import CORES, TYPE_CHECKING
from .broker_mpw import MpWorker
from .util import mp
from .broker_util import try_exec
from .util import Daemon, mp
if TYPE_CHECKING:
from .svchub import SvcHub
if True: # pylint: disable=using-constant-test
from typing import Any
class MProcess(mp.Process):
def __init__(
self,
q_pend: queue.Queue[tuple[int, str, list[Any]]],
q_yield: queue.Queue[tuple[int, str, list[Any]]],
target: Any,
args: Any,
) -> None:
super(MProcess, self).__init__(target=target, args=args)
self.q_pend = q_pend
self.q_yield = q_yield
class BrokerMp(object):
"""external api; manages MpWorkers"""
def __init__(self, hub):
def __init__(self, hub: "SvcHub") -> None:
self.hub = hub
self.log = hub.log
self.args = hub.args
self.procs = []
self.retpend = {}
self.retpend_mutex = threading.Lock()
self.mutex = threading.Lock()
self.num_workers = self.args.j or mp.cpu_count()
self.num_workers = self.args.j or CORES
self.log("broker", "booting {} subprocesses".format(self.num_workers))
for n in range(1, self.num_workers + 1):
q_pend = mp.Queue(1)
q_yield = mp.Queue(64)
proc = mp.Process(target=MpWorker, args=(q_pend, q_yield, self.args, n))
proc.q_pend = q_pend
proc.q_yield = q_yield
proc.clients = {}
thr = threading.Thread(
target=self.collector, args=(proc,), name="mp-sink-{}".format(n)
)
thr.daemon = True
thr.start()
q_pend: queue.Queue[tuple[int, str, list[Any]]] = mp.Queue(1)
q_yield: queue.Queue[tuple[int, str, list[Any]]] = mp.Queue(64)
proc = MProcess(q_pend, q_yield, MpWorker, (q_pend, q_yield, self.args, n))
Daemon(self.collector, "mp-sink-{}".format(n), (proc,))
self.procs.append(proc)
proc.start()
def shutdown(self):
def shutdown(self) -> None:
self.log("broker", "shutting down")
for n, proc in enumerate(self.procs):
thr = threading.Thread(
target=proc.q_pend.put([0, "shutdown", []]),
target=proc.q_pend.put((0, "shutdown", [])),
name="mp-shutdown-{}-{}".format(n, len(self.procs)),
)
thr.start()
@@ -62,12 +74,12 @@ class BrokerMp(object):
procs.pop()
def reload(self):
def reload(self) -> None:
self.log("broker", "reloading")
for _, proc in enumerate(self.procs):
proc.q_pend.put([0, "reload", []])
proc.q_pend.put((0, "reload", []))
def collector(self, proc):
def collector(self, proc: MProcess) -> None:
"""receive message from hub in other process"""
while True:
msg = proc.q_yield.get()
@@ -78,24 +90,24 @@ class BrokerMp(object):
elif dest == "retq":
# response from previous ipc call
with self.retpend_mutex:
retq = self.retpend.pop(retq_id)
retq.put(args)
raise Exception("invalid broker_mp usage")
else:
# new ipc invoking managed service in hub
obj = self.hub
for node in dest.split("."):
obj = getattr(obj, node)
try:
obj = self.hub
for node in dest.split("."):
obj = getattr(obj, node)
# TODO will deadlock if dest performs another ipc
rv = try_exec(retq_id, obj, *args)
# TODO will deadlock if dest performs another ipc
rv = try_exec(retq_id, obj, *args)
except:
rv = ["exception", "stack", traceback.format_exc()]
if retq_id:
proc.q_pend.put([retq_id, "retq", rv])
proc.q_pend.put((retq_id, "retq", rv))
def put(self, want_retval, dest, *args):
def say(self, dest: str, *args: Any) -> None:
"""
send message to non-hub component in other process,
returns a Queue object which eventually contains the response if want_retval
@@ -103,7 +115,11 @@ class BrokerMp(object):
"""
if dest == "listen":
for p in self.procs:
p.q_pend.put([0, dest, [args[0], len(self.procs)]])
p.q_pend.put((0, dest, [args[0], len(self.procs)]))
elif dest == "set_netdevs":
for p in self.procs:
p.q_pend.put((0, dest, list(args)))
elif dest == "cb_httpsrv_up":
self.hub.cb_httpsrv_up()

View File

@@ -1,20 +1,38 @@
# coding: utf-8
from __future__ import print_function, unicode_literals
import sys
import argparse
import os
import signal
import sys
import threading
from .broker_util import ExceptionalQueue
import queue
from .__init__ import ANYWIN
from .authsrv import AuthSrv
from .broker_util import BrokerCli, ExceptionalQueue
from .httpsrv import HttpSrv
from .util import FAKE_MP
from copyparty.authsrv import AuthSrv
from .util import FAKE_MP, Daemon, HMaccas
if True: # pylint: disable=using-constant-test
from types import FrameType
from typing import Any, Optional, Union
class MpWorker(object):
class MpWorker(BrokerCli):
"""one single mp instance"""
def __init__(self, q_pend, q_yield, args, n):
def __init__(
self,
q_pend: queue.Queue[tuple[int, str, list[Any]]],
q_yield: queue.Queue[tuple[int, str, list[Any]]],
args: argparse.Namespace,
n: int,
) -> None:
super(MpWorker, self).__init__()
self.q_pend = q_pend
self.q_yield = q_yield
self.args = args
@@ -22,43 +40,45 @@ class MpWorker(object):
self.log = self._log_disabled if args.q and not args.lo else self._log_enabled
self.retpend = {}
self.retpend: dict[int, Any] = {}
self.retpend_mutex = threading.Lock()
self.mutex = threading.Lock()
# we inherited signal_handler from parent,
# replace it with something harmless
if not FAKE_MP:
for sig in [signal.SIGINT, signal.SIGTERM, signal.SIGUSR1]:
sigs = [signal.SIGINT, signal.SIGTERM]
if not ANYWIN:
sigs.append(signal.SIGUSR1)
for sig in sigs:
signal.signal(sig, self.signal_handler)
# starting to look like a good idea
self.asrv = AuthSrv(args, None, False)
# instantiate all services here (TODO: inheritance?)
self.iphash = HMaccas(os.path.join(self.args.E.cfg, "iphash"), 8)
self.httpsrv = HttpSrv(self, n)
# on winxp and some other platforms,
# use thr.join() to block all signals
thr = threading.Thread(target=self.main, name="mpw-main")
thr.daemon = True
thr.start()
thr.join()
Daemon(self.main, "mpw-main").join()
def signal_handler(self, sig, frame):
def signal_handler(self, sig: Optional[int], frame: Optional[FrameType]) -> None:
# print('k')
pass
def _log_enabled(self, src, msg, c=0):
self.q_yield.put([0, "log", [src, msg, c]])
def _log_enabled(self, src: str, msg: str, c: Union[int, str] = 0) -> None:
self.q_yield.put((0, "log", [src, msg, c]))
def _log_disabled(self, src, msg, c=0):
def _log_disabled(self, src: str, msg: str, c: Union[int, str] = 0) -> None:
pass
def logw(self, msg, c=0):
def logw(self, msg: str, c: Union[int, str] = 0) -> None:
self.log("mp{}".format(self.n), msg, c)
def main(self):
def main(self) -> None:
while True:
retq_id, dest, args = self.q_pend.get()
@@ -77,6 +97,9 @@ class MpWorker(object):
elif dest == "listen":
self.httpsrv.listen(args[0], args[1])
elif dest == "set_netdevs":
self.httpsrv.set_netdevs(args[0])
elif dest == "retq":
# response from previous ipc call
with self.retpend_mutex:
@@ -87,15 +110,14 @@ class MpWorker(object):
else:
raise Exception("what is " + str(dest))
def put(self, want_retval, dest, *args):
if want_retval:
retq = ExceptionalQueue(1)
retq_id = id(retq)
with self.retpend_mutex:
self.retpend[retq_id] = retq
else:
retq = None
retq_id = 0
def ask(self, dest: str, *args: Any) -> ExceptionalQueue:
retq = ExceptionalQueue(1)
retq_id = id(retq)
with self.retpend_mutex:
self.retpend[retq_id] = retq
self.q_yield.put([retq_id, dest, args])
self.q_yield.put((retq_id, dest, list(args)))
return retq
def say(self, dest: str, *args: Any) -> None:
self.q_yield.put((0, dest, list(args)))

View File

@@ -1,16 +1,27 @@
# coding: utf-8
from __future__ import print_function, unicode_literals
import os
import threading
from .__init__ import TYPE_CHECKING
from .broker_util import BrokerCli, ExceptionalQueue, try_exec
from .httpsrv import HttpSrv
from .broker_util import ExceptionalQueue, try_exec
from .util import HMaccas
if TYPE_CHECKING:
from .svchub import SvcHub
if True: # pylint: disable=using-constant-test
from typing import Any
class BrokerThr(object):
class BrokerThr(BrokerCli):
"""external api; behaves like BrokerMP but using plain threads"""
def __init__(self, hub):
def __init__(self, hub: "SvcHub") -> None:
super(BrokerThr, self).__init__()
self.hub = hub
self.log = hub.log
self.args = hub.args
@@ -20,32 +31,43 @@ class BrokerThr(object):
self.num_workers = 1
# instantiate all services here (TODO: inheritance?)
self.iphash = HMaccas(os.path.join(self.args.E.cfg, "iphash"), 8)
self.httpsrv = HttpSrv(self, None)
self.reload = self.noop
def shutdown(self):
def shutdown(self) -> None:
# self.log("broker", "shutting down")
self.httpsrv.shutdown()
def noop(self):
def noop(self) -> None:
pass
def put(self, want_retval, dest, *args):
def ask(self, dest: str, *args: Any) -> ExceptionalQueue:
# new ipc invoking managed service in hub
obj = self.hub
for node in dest.split("."):
obj = getattr(obj, node)
rv = try_exec(True, obj, *args)
# pretend we're broker_mp
retq = ExceptionalQueue(1)
retq.put(rv)
return retq
def say(self, dest: str, *args: Any) -> None:
if dest == "listen":
self.httpsrv.listen(args[0], 1)
return
else:
# new ipc invoking managed service in hub
obj = self.hub
for node in dest.split("."):
obj = getattr(obj, node)
if dest == "set_netdevs":
self.httpsrv.set_netdevs(args[0])
return
# TODO will deadlock if dest performs another ipc
rv = try_exec(want_retval, obj, *args)
if not want_retval:
return
# new ipc invoking managed service in hub
obj = self.hub
for node in dest.split("."):
obj = getattr(obj, node)
# pretend we're broker_mp
retq = ExceptionalQueue(1)
retq.put(rv)
return retq
try_exec(False, obj, *args)

View File

@@ -1,17 +1,28 @@
# coding: utf-8
from __future__ import print_function, unicode_literals
import argparse
import traceback
from .util import Pebkac, Queue
from queue import Queue
from .__init__ import TYPE_CHECKING
from .authsrv import AuthSrv
from .util import HMaccas, Pebkac
if True: # pylint: disable=using-constant-test
from typing import Any, Optional, Union
from .util import RootLogger
if TYPE_CHECKING:
from .httpsrv import HttpSrv
class ExceptionalQueue(Queue, object):
def get(self, block=True, timeout=None):
def get(self, block: bool = True, timeout: Optional[float] = None) -> Any:
rv = super(ExceptionalQueue, self).get(block, timeout)
# TODO: how expensive is this?
if isinstance(rv, list):
if rv[0] == "exception":
if rv[1] == "pebkac":
@@ -22,7 +33,29 @@ class ExceptionalQueue(Queue, object):
return rv
def try_exec(want_retval, func, *args):
class BrokerCli(object):
"""
helps mypy understand httpsrv.broker but still fails a few levels deeper,
for example resolving httpconn.* in httpcli -- see lines tagged #mypy404
"""
log: "RootLogger"
args: argparse.Namespace
asrv: AuthSrv
httpsrv: "HttpSrv"
iphash: HMaccas
def __init__(self) -> None:
pass
def ask(self, dest: str, *args: Any) -> ExceptionalQueue:
return ExceptionalQueue(1)
def say(self, dest: str, *args: Any) -> None:
pass
def try_exec(want_retval: Union[bool, int], func: Any, *args: list[Any]) -> Any:
try:
return func(*args)

150
copyparty/cfg.py Normal file
View File

@@ -0,0 +1,150 @@
# coding: utf-8
from __future__ import print_function, unicode_literals
# awk -F\" '/add_argument\("-[^-]/{print(substr($2,2))}' copyparty/__main__.py | sort | tr '\n' ' '
zs = "a c e2d e2ds e2dsa e2t e2ts e2tsr e2v e2vp e2vu ed emp i j lo mcr mte mth mtm mtp nb nc nid nih nw p q s ss sss v z zv"
onedash = set(zs.split())
def vf_bmap() -> dict[str, str]:
"""argv-to-volflag: simple bools"""
ret = {
"never_symlink": "neversymlink",
"no_dedup": "copydupes",
"no_dupe": "nodupe",
"no_forget": "noforget",
}
for k in (
"dotsrch",
"e2t",
"e2ts",
"e2tsr",
"e2v",
"e2vu",
"e2vp",
"hardlink",
"magic",
"no_sb_md",
"no_sb_lg",
"rand",
"xdev",
"xlink",
"xvol",
):
ret[k] = k
return ret
def vf_vmap() -> dict[str, str]:
"""argv-to-volflag: simple values"""
ret = {}
for k in ("lg_sbf", "md_sbf"):
ret[k] = k
return ret
def vf_cmap() -> dict[str, str]:
"""argv-to-volflag: complex/lists"""
ret = {}
for k in ("dbd", "html_head", "mte", "mth", "nrand"):
ret[k] = k
return ret
permdescs = {
"r": "read; list folder contents, download files",
"w": 'write; upload files; need "r" to see the uploads',
"m": 'move; move files and folders; need "w" at destination',
"d": "delete; permanently delete files and folders",
"g": "get; download files, but cannot see folder contents",
"G": 'upget; same as "g" but can see filekeys of their own uploads',
}
flagcats = {
"uploads, general": {
"nodupe": "rejects existing files (instead of symlinking them)",
"hardlink": "does dedup with hardlinks instead of symlinks",
"neversymlink": "disables symlink fallback; full copy instead",
"copydupes": "disables dedup, always saves full copies of dupes",
"daw": "enable full WebDAV write support (dangerous);\nPUT-operations will now \033[1;31mOVERWRITE\033[0;35m existing files",
"nosub": "forces all uploads into the top folder of the vfs",
"magic": "enables filetype detection for nameless uploads",
"gz": "allows server-side gzip of uploads with ?gz (also c,xz)",
"pk": "forces server-side compression, optional arg: xz,9",
},
"upload rules": {
"maxn=250,600": "max 250 uploads over 15min",
"maxb=1g,300": "max 1 GiB over 5min (suffixes: b, k, m, g)",
"rand": "force randomized filenames, 9 chars long by default",
"nrand=N": "randomized filenames are N chars long",
"sz=1k-3m": "allow filesizes between 1 KiB and 3MiB",
"df=1g": "ensure 1 GiB free disk space",
},
"upload rotation\n(moves all uploads into the specified folder structure)": {
"rotn=100,3": "3 levels of subfolders with 100 entries in each",
"rotf=%Y-%m/%d-%H": "date-formatted organizing",
"lifetime=3600": "uploads are deleted after 1 hour",
},
"database, general": {
"e2d": "enable database; makes files searchable + enables upload dedup",
"e2ds": "scan writable folders for new files on startup; also sets -e2d",
"e2dsa": "scans all folders for new files on startup; also sets -e2d",
"e2t": "enable multimedia indexing; makes it possible to search for tags",
"e2ts": "scan existing files for tags on startup; also sets -e2t",
"e2tsa": "delete all metadata from DB (full rescan); also sets -e2ts",
"d2ts": "disables metadata collection for existing files",
"d2ds": "disables onboot indexing, overrides -e2ds*",
"d2t": "disables metadata collection, overrides -e2t*",
"d2v": "disables file verification, overrides -e2v*",
"d2d": "disables all database stuff, overrides -e2*",
"hist=/tmp/cdb": "puts thumbnails and indexes at that location",
"scan=60": "scan for new files every 60sec, same as --re-maxage",
"nohash=\\.iso$": "skips hashing file contents if path matches *.iso",
"noidx=\\.iso$": "fully ignores the contents at paths matching *.iso",
"noforget": "don't forget files when deleted from disk",
"dbd=[acid|swal|wal|yolo]": "database speed-durability tradeoff",
"xlink": "cross-volume dupe detection / linking",
"xdev": "do not descend into other filesystems",
"xvol": "skip symlinks leaving the volume root",
"dotsrch": "show dotfiles in search results",
"nodotsrch": "hide dotfiles in search results (default)",
},
'database, audio tags\n"mte", "mth", "mtp", "mtm" all work the same as -mte, -mth, ...': {
"mtp=.bpm=f,audio-bpm.py": 'uses the "audio-bpm.py" program to\ngenerate ".bpm" tags from uploads (f = overwrite tags)',
"mtp=ahash,vhash=media-hash.py": "collects two tags at once",
},
"thumbnails": {
"dthumb": "disables all thumbnails",
"dvthumb": "disables video thumbnails",
"dathumb": "disables audio thumbnails (spectrograms)",
"dithumb": "disables image thumbnails",
},
"event hooks\n(better explained in --help-hooks)": {
"xbu=CMD": "execute CMD before a file upload starts",
"xau=CMD": "execute CMD after a file upload finishes",
"xiu=CMD": "execute CMD after all uploads finish and volume is idle",
"xbr=CMD": "execute CMD before a file rename/move",
"xar=CMD": "execute CMD after a file rename/move",
"xbd=CMD": "execute CMD before a file delete",
"xad=CMD": "execute CMD after a file delete",
"xm=CMD": "execute CMD on message",
},
"client and ux": {
"html_head=TXT": "includes TXT in the <head>",
"robots": "allows indexing by search engines (default)",
"norobots": "kindly asks search engines to leave",
"no_sb_md": "disable js sandbox for markdown files",
"no_sb_lg": "disable js sandbox for prologue/epilogue",
"sb_md": "enable js sandbox for markdown files (default)",
"sb_lg": "enable js sandbox for prologue/epilogue (default)",
"md_sbf": "list of markdown-sandbox safeguards to disable",
"lg_sbf": "list of *logue-sandbox safeguards to disable",
},
"others": {
"fk=8": 'generates per-file accesskeys,\nwhich will then be required at the "g" permission'
},
}
flagdescs = {k.split("=")[0]: v for tab in flagcats.values() for k, v in tab.items()}

72
copyparty/dxml.py Normal file
View File

@@ -0,0 +1,72 @@
import importlib
import sys
import xml.etree.ElementTree as ET
from .__init__ import PY2
if True: # pylint: disable=using-constant-test
from typing import Any, Optional
def get_ET() -> ET.XMLParser:
pn = "xml.etree.ElementTree"
cn = "_elementtree"
cmod = sys.modules.pop(cn, None)
if not cmod:
return ET.XMLParser # type: ignore
pmod = sys.modules.pop(pn)
sys.modules[cn] = None # type: ignore
ret = importlib.import_module(pn)
for name, mod in ((pn, pmod), (cn, cmod)):
if mod:
sys.modules[name] = mod
else:
sys.modules.pop(name, None)
sys.modules["xml.etree"].ElementTree = pmod # type: ignore
ret.ParseError = ET.ParseError # type: ignore
return ret.XMLParser # type: ignore
XMLParser: ET.XMLParser = get_ET()
class DXMLParser(XMLParser): # type: ignore
def __init__(self) -> None:
tb = ET.TreeBuilder()
super(DXMLParser, self).__init__(target=tb)
p = self._parser if PY2 else self.parser
p.StartDoctypeDeclHandler = self.nope
p.EntityDeclHandler = self.nope
p.UnparsedEntityDeclHandler = self.nope
p.ExternalEntityRefHandler = self.nope
def nope(self, *a: Any, **ka: Any) -> None:
raise BadXML("{}, {}".format(a, ka))
class BadXML(Exception):
pass
def parse_xml(txt: str) -> ET.Element:
parser = DXMLParser()
parser.feed(txt)
return parser.close() # type: ignore
def mktnod(name: str, text: str) -> ET.Element:
el = ET.Element(name)
el.text = text
return el
def mkenod(name: str, sub_el: Optional[ET.Element] = None) -> ET.Element:
el = ET.Element(name)
if sub_el is not None:
el.append(sub_el)
return el

152
copyparty/fsutil.py Normal file
View File

@@ -0,0 +1,152 @@
# coding: utf-8
from __future__ import print_function, unicode_literals
import os
import re
import time
from .__init__ import ANYWIN, MACOS
from .authsrv import AXS, VFS
from .bos import bos
from .util import chkcmd, min_ex
if True: # pylint: disable=using-constant-test
from typing import Optional, Union
from .util import RootLogger
class Fstab(object):
def __init__(self, log: "RootLogger"):
self.log_func = log
self.trusted = False
self.tab: Optional[VFS] = None
self.cache: dict[str, str] = {}
self.age = 0.0
def log(self, msg: str, c: Union[int, str] = 0) -> None:
self.log_func("fstab", msg, c)
def get(self, path: str) -> str:
if len(self.cache) > 9000:
self.age = time.time()
self.tab = None
self.cache = {}
fs = "ext4"
msg = "failed to determine filesystem at [{}]; assuming {}\n{}"
if ANYWIN:
fs = "vfat"
try:
path = self._winpath(path)
except:
self.log(msg.format(path, fs, min_ex()), 3)
return fs
path = path.lstrip("/")
try:
return self.cache[path]
except:
pass
try:
fs = self.get_w32(path) if ANYWIN else self.get_unix(path)
except:
self.log(msg.format(path, fs, min_ex()), 3)
fs = fs.lower()
self.cache[path] = fs
self.log("found {} at {}".format(fs, path))
return fs
def _winpath(self, path: str) -> str:
# try to combine volume-label + st_dev (vsn)
path = path.replace("/", "\\")
vid = path.split(":", 1)[0].strip("\\").split("\\", 1)[0]
try:
return "{}*{}".format(vid, bos.stat(path).st_dev)
except:
return vid
def build_fallback(self) -> None:
self.tab = VFS(self.log_func, "idk", "/", AXS(), {})
self.trusted = False
def build_tab(self) -> None:
self.log("building tab")
sptn = r"^.*? on (.*) type ([^ ]+) \(.*"
if MACOS:
sptn = r"^.*? on (.*) \(([^ ]+), .*"
ptn = re.compile(sptn)
so, _ = chkcmd(["mount"])
tab1: list[tuple[str, str]] = []
for ln in so.split("\n"):
m = ptn.match(ln)
if not m:
continue
zs1, zs2 = m.groups()
tab1.append((str(zs1), str(zs2)))
tab1.sort(key=lambda x: (len(x[0]), x[0]))
path1, fs1 = tab1[0]
tab = VFS(self.log_func, fs1, path1, AXS(), {})
for path, fs in tab1[1:]:
tab.add(fs, path.lstrip("/"))
self.tab = tab
def relabel(self, path: str, nval: str) -> None:
assert self.tab
self.cache = {}
if ANYWIN:
path = self._winpath(path)
path = path.lstrip("/")
ptn = re.compile(r"^[^\\/]*")
vn, rem = self.tab._find(path)
if not self.trusted:
# no mtab access; have to build as we go
if "/" in rem:
self.tab.add("idk", os.path.join(vn.vpath, rem.split("/")[0]))
if rem:
self.tab.add(nval, path)
else:
vn.realpath = nval
return
visit = [vn]
while visit:
vn = visit.pop()
vn.realpath = ptn.sub(nval, vn.realpath)
visit.extend(list(vn.nodes.values()))
def get_unix(self, path: str) -> str:
if not self.tab:
try:
self.build_tab()
self.trusted = True
except:
# prisonparty or other restrictive environment
self.log("failed to build tab:\n{}".format(min_ex()), 3)
self.build_fallback()
assert self.tab
ret = self.tab._find(path)[0]
if self.trusted or path == ret.vpath:
return ret.realpath.split("/")[0]
else:
return "idk"
def get_w32(self, path: str) -> str:
if not self.tab:
self.build_fallback()
assert self.tab
ret = self.tab._find(path)[0]
return ret.realpath

View File

@@ -1,16 +1,33 @@
# coding: utf-8
from __future__ import print_function, unicode_literals
import os
import sys
import stat
import time
import argparse
import logging
import threading
import os
import stat
import sys
import time
from .__init__ import E, PY2
from .util import Pebkac, fsenc, exclude_dotfiles
from pyftpdlib.authorizers import AuthenticationFailed, DummyAuthorizer
from pyftpdlib.filesystems import AbstractedFS, FilesystemError
from pyftpdlib.handlers import FTPHandler
from pyftpdlib.servers import FTPServer
from .__init__ import ANYWIN, PY2, TYPE_CHECKING, E
from .authsrv import VFS
from .bos import bos
from .util import (
Daemon,
Pebkac,
exclude_dotfiles,
fsenc,
ipnorm,
pybin,
relchk,
runhook,
sanitize_fn,
vjoin,
)
try:
from pyftpdlib.ioloop import IOLoop
@@ -20,133 +37,202 @@ except ImportError:
sys.path.append(p)
from pyftpdlib.ioloop import IOLoop
from pyftpdlib.authorizers import DummyAuthorizer, AuthenticationFailed
from pyftpdlib.filesystems import AbstractedFS, FilesystemError
from pyftpdlib.handlers import FTPHandler
from pyftpdlib.servers import FTPServer
from pyftpdlib.log import config_logging
if TYPE_CHECKING:
from .svchub import SvcHub
try:
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from .svchub import SvcHub
except ImportError:
pass
if True: # pylint: disable=using-constant-test
import typing
from typing import Any, Optional
class FtpAuth(DummyAuthorizer):
def __init__(self):
def __init__(self, hub: "SvcHub") -> None:
super(FtpAuth, self).__init__()
self.hub = None # type: SvcHub
self.hub = hub
def validate_authentication(
self, username: str, password: str, handler: Any
) -> None:
handler.username = "{}:{}".format(username, password)
handler.uname = "*"
ip = handler.addr[0]
if ip.startswith("::ffff:"):
ip = ip[7:]
ip = ipnorm(ip)
bans = self.hub.bans
if ip in bans:
rt = bans[ip] - time.time()
if rt < 0:
logging.info("client unbanned")
del bans[ip]
else:
raise AuthenticationFailed("banned")
def validate_authentication(self, username, password, handler):
asrv = self.hub.asrv
if username == "anonymous":
password = ""
uname = "*"
else:
uname = asrv.iacct.get(password, "") or asrv.iacct.get(username, "") or "*"
uname = "*"
if password:
uname = asrv.iacct.get(password, None)
if not uname or not (asrv.vfs.aread.get(uname) or asrv.vfs.awrite.get(uname)):
g = self.hub.gpwd
if g.lim:
bonk, ip = g.bonk(ip, handler.username)
if bonk:
logging.warning("client banned: invalid passwords")
bans[ip] = bonk
handler.username = uname
if password and not uname:
raise AuthenticationFailed("Authentication failed.")
def get_home_dir(self, username):
handler.uname = uname
def get_home_dir(self, username: str) -> str:
return "/"
def has_user(self, username):
def has_user(self, username: str) -> bool:
asrv = self.hub.asrv
return username in asrv.acct
return username in asrv.acct or username in asrv.iacct
def has_perm(self, username, perm, path=None):
def has_perm(self, username: str, perm: int, path: Optional[str] = None) -> bool:
return True # handled at filesystem layer
def get_perms(self, username):
def get_perms(self, username: str) -> str:
return "elradfmwMT"
def get_msg_login(self, username):
def get_msg_login(self, username: str) -> str:
return "sup {}".format(username)
def get_msg_quit(self, username):
def get_msg_quit(self, username: str) -> str:
return "cya"
class FtpFs(AbstractedFS):
def __init__(self, root, cmd_channel):
def __init__(
self, root: str, cmd_channel: Any
) -> None: # pylint: disable=super-init-not-called
self.h = self.cmd_channel = cmd_channel # type: FTPHandler
self.hub = cmd_channel.hub # type: SvcHub
self.hub: "SvcHub" = cmd_channel.hub
self.args = cmd_channel.args
self.uname = self.hub.asrv.iacct.get(cmd_channel.password, "*")
self.uname = cmd_channel.uname
self.cwd = "/" # pyftpdlib convention of leading slash
self.root = "/var/lib/empty"
self.can_read = self.can_write = self.can_move = False
self.can_delete = self.can_get = self.can_upget = False
self.listdirinfo = self.listdir
self.chdir(".")
def v2a(self, vpath, r=False, w=False, m=False, d=False):
def die(self, msg):
self.h.die(msg)
raise Exception()
def v2a(
self,
vpath: str,
r: bool = False,
w: bool = False,
m: bool = False,
d: bool = False,
) -> tuple[str, VFS, str]:
try:
vpath = vpath.replace("\\", "/").lstrip("/")
rd, fn = os.path.split(vpath)
if ANYWIN and relchk(rd):
logging.warning("malicious vpath: %s", vpath)
self.die("Unsupported characters in filepath")
fn = sanitize_fn(fn or "", "", [".prologue.html", ".epilogue.html"])
vpath = vjoin(rd, fn)
vfs, rem = self.hub.asrv.vfs.get(vpath, self.uname, r, w, m, d)
if not vfs.realpath:
raise FilesystemError("no filesystem mounted at this path")
self.die("No filesystem mounted at this path")
return os.path.join(vfs.realpath, rem)
return os.path.join(vfs.realpath, rem), vfs, rem
except Pebkac as ex:
raise FilesystemError(str(ex))
self.die(str(ex))
def rv2a(self, vpath, r=False, w=False, m=False, d=False):
def rv2a(
self,
vpath: str,
r: bool = False,
w: bool = False,
m: bool = False,
d: bool = False,
) -> tuple[str, VFS, str]:
return self.v2a(os.path.join(self.cwd, vpath), r, w, m, d)
def ftp2fs(self, ftppath):
def ftp2fs(self, ftppath: str) -> str:
# return self.v2a(ftppath)
return ftppath # self.cwd must be vpath
def fs2ftp(self, fspath):
def fs2ftp(self, fspath: str) -> str:
# raise NotImplementedError()
return fspath
def validpath(self, path):
def validpath(self, path: str) -> bool:
if "/.hist/" in path:
if "/up2k." in path or path.endswith("/dir.txt"):
raise FilesystemError("access to this file is forbidden")
self.die("Access to this file is forbidden")
return True
def open(self, filename, mode):
def open(self, filename: str, mode: str) -> typing.IO[Any]:
r = "r" in mode
w = "w" in mode or "a" in mode or "+" in mode
ap = self.rv2a(filename, r, w)
if w and bos.path.exists(ap):
raise FilesystemError("cannot open existing file for writing")
ap = self.rv2a(filename, r, w)[0]
if w:
try:
st = bos.stat(ap)
td = time.time() - st.st_mtime
except:
td = 0
if td < -1 or td > self.args.ftp_wt:
self.die("Cannot open existing file for writing")
self.validpath(ap)
return open(fsenc(ap), mode)
def chdir(self, path):
self.cwd = join(self.cwd, path)
x = self.hub.asrv.vfs.can_access(self.cwd.lstrip("/"), self.h.username)
self.can_read, self.can_write, self.can_move, self.can_delete, self.can_get = x
def chdir(self, path: str) -> None:
nwd = join(self.cwd, path)
vfs, rem = self.hub.asrv.vfs.get(nwd, self.uname, False, False)
ap = vfs.canonical(rem)
if not bos.path.isdir(ap):
# returning 550 is library-default and suitable
self.die("Failed to change directory")
def mkdir(self, path):
ap = self.rv2a(path, w=True)
bos.mkdir(ap)
self.cwd = nwd
(
self.can_read,
self.can_write,
self.can_move,
self.can_delete,
self.can_get,
self.can_upget,
) = self.hub.asrv.vfs.can_access(self.cwd.lstrip("/"), self.h.uname)
def listdir(self, path):
def mkdir(self, path: str) -> None:
ap = self.rv2a(path, w=True)[0]
bos.makedirs(ap) # filezilla expects this
def listdir(self, path: str) -> list[str]:
vpath = join(self.cwd, path).lstrip("/")
try:
vfs, rem = self.hub.asrv.vfs.get(vpath, self.uname, True, False)
fsroot, vfs_ls, vfs_virt = vfs.ls(
rem, self.uname, not self.args.no_scandir, [[True], [False, True]]
fsroot, vfs_ls1, vfs_virt = vfs.ls(
rem,
self.uname,
not self.args.no_scandir,
[[True, False], [False, True]],
)
vfs_ls = [x[0] for x in vfs_ls]
vfs_ls = [x[0] for x in vfs_ls1]
vfs_ls.extend(vfs_virt.keys())
if not self.args.ed:
@@ -154,7 +240,7 @@ class FtpFs(AbstractedFS):
vfs_ls.sort()
return vfs_ls
except Exception as ex:
except:
if vpath:
# display write-only folders as empty
return []
@@ -163,138 +249,178 @@ class FtpFs(AbstractedFS):
r = {x.split("/")[0]: 1 for x in self.hub.asrv.vfs.all_vols.keys()}
return list(sorted(list(r.keys())))
def rmdir(self, path):
ap = self.rv2a(path, d=True)
def rmdir(self, path: str) -> None:
ap = self.rv2a(path, d=True)[0]
bos.rmdir(ap)
def remove(self, path):
def remove(self, path: str) -> None:
if self.args.no_del:
raise FilesystemError("the delete feature is disabled in server config")
self.die("The delete feature is disabled in server config")
vp = join(self.cwd, path).lstrip("/")
x = self.hub.broker.put(
True, "up2k.handle_rm", self.uname, self.h.remote_ip, [vp]
)
try:
x.get()
self.hub.up2k.handle_rm(self.uname, self.h.cli_ip, [vp], [])
except Exception as ex:
raise FilesystemError(str(ex))
self.die(str(ex))
def rename(self, src, dst):
def rename(self, src: str, dst: str) -> None:
if not self.can_move:
raise FilesystemError("not allowed for user " + self.h.username)
self.die("Not allowed for user " + self.h.uname)
if self.args.no_mv:
m = "the rename/move feature is disabled in server config"
raise FilesystemError(m)
self.die("The rename/move feature is disabled in server config")
svp = join(self.cwd, src).lstrip("/")
dvp = join(self.cwd, dst).lstrip("/")
x = self.hub.broker.put(True, "up2k.handle_mv", self.uname, svp, dvp)
try:
x.get()
self.hub.up2k.handle_mv(self.uname, svp, dvp)
except Exception as ex:
raise FilesystemError(str(ex))
self.die(str(ex))
def chmod(self, path, mode):
def chmod(self, path: str, mode: str) -> None:
pass
def stat(self, path):
def stat(self, path: str) -> os.stat_result:
try:
ap = self.rv2a(path, r=True)
ap = self.rv2a(path, r=True)[0]
return bos.stat(ap)
except:
ap = self.rv2a(path)
ap = self.rv2a(path)[0]
st = bos.stat(ap)
if not stat.S_ISDIR(st.st_mode):
raise
return st
def utime(self, path, timeval):
ap = self.rv2a(path, w=True)
def utime(self, path: str, timeval: float) -> None:
ap = self.rv2a(path, w=True)[0]
return bos.utime(ap, (timeval, timeval))
def lstat(self, path):
ap = self.rv2a(path)
return bos.lstat(ap)
def lstat(self, path: str) -> os.stat_result:
ap = self.rv2a(path)[0]
return bos.stat(ap)
def isfile(self, path):
st = self.stat(path)
return stat.S_ISREG(st.st_mode)
def isfile(self, path: str) -> bool:
try:
st = self.stat(path)
return stat.S_ISREG(st.st_mode)
except:
return False # expected for mojibake in ftp_SIZE()
def islink(self, path):
ap = self.rv2a(path)
def islink(self, path: str) -> bool:
ap = self.rv2a(path)[0]
return bos.path.islink(ap)
def isdir(self, path):
def isdir(self, path: str) -> bool:
try:
st = self.stat(path)
return stat.S_ISDIR(st.st_mode)
except:
return True
def getsize(self, path):
ap = self.rv2a(path)
def getsize(self, path: str) -> int:
ap = self.rv2a(path)[0]
return bos.path.getsize(ap)
def getmtime(self, path):
ap = self.rv2a(path)
def getmtime(self, path: str) -> float:
ap = self.rv2a(path)[0]
return bos.path.getmtime(ap)
def realpath(self, path):
def realpath(self, path: str) -> str:
return path
def lexists(self, path):
ap = self.rv2a(path)
def lexists(self, path: str) -> bool:
ap = self.rv2a(path)[0]
return bos.path.lexists(ap)
def get_user_by_uid(self, uid):
def get_user_by_uid(self, uid: int) -> str:
return "root"
def get_group_by_uid(self, gid):
def get_group_by_uid(self, gid: int) -> str:
return "root"
class FtpHandler(FTPHandler):
abstracted_fs = FtpFs
hub: "SvcHub"
args: argparse.Namespace
uname: str
def __init__(self, conn: Any, server: Any, ioloop: Any = None) -> None:
self.hub: "SvcHub" = FtpHandler.hub
self.args: argparse.Namespace = FtpHandler.args
self.uname = "*"
def __init__(self, conn, server, ioloop=None):
if PY2:
FTPHandler.__init__(self, conn, server, ioloop)
else:
super(FtpHandler, self).__init__(conn, server, ioloop)
# abspath->vpath mapping to resolve log_transfer paths
self.vfs_map = {}
cip = self.remote_ip
self.cli_ip = cip[7:] if cip.startswith("::ffff:") else cip
def ftp_STOR(self, file, mode="w"):
# abspath->vpath mapping to resolve log_transfer paths
self.vfs_map: dict[str, str] = {}
# reduce non-debug logging
self.log_cmds_list = [x for x in self.log_cmds_list if x not in ("CWD", "XCWD")]
def die(self, msg):
self.respond("550 {}".format(msg))
raise FilesystemError(msg)
def ftp_STOR(self, file: str, mode: str = "w") -> Any:
# Optional[str]
vp = join(self.fs.cwd, file).lstrip("/")
ap = self.fs.v2a(vp)
ap, vfs, rem = self.fs.v2a(vp)
self.vfs_map[ap] = vp
xbu = vfs.flags.get("xbu")
if xbu and not runhook(
None,
xbu,
ap,
vfs.canonical(rem),
"",
self.uname,
0,
0,
self.cli_ip,
0,
"",
):
self.die("Upload blocked by xbu server config")
# print("ftp_STOR: {} {} => {}".format(vp, mode, ap))
ret = FTPHandler.ftp_STOR(self, file, mode)
# print("ftp_STOR: {} {} OK".format(vp, mode))
return ret
def log_transfer(self, cmd, filename, receive, completed, elapsed, bytes):
def log_transfer(
self,
cmd: str,
filename: bytes,
receive: bool,
completed: bool,
elapsed: float,
bytes: int,
) -> Any:
# None
ap = filename.decode("utf-8", "replace")
vp = self.vfs_map.pop(ap, None)
# print("xfer_end: {} => {}".format(ap, vp))
if vp:
vp, fn = os.path.split(vp)
vfs, rem = self.hub.asrv.vfs.get(vp, self.username, False, True)
vfs, rem = self.hub.asrv.vfs.get(vp, self.uname, False, True)
vfs, rem = vfs.get_dbv(rem)
self.hub.broker.put(
False,
"up2k.hash_file",
self.hub.up2k.hash_file(
vfs.realpath,
vfs.vpath,
vfs.flags,
rem,
fn,
self.remote_ip,
self.cli_ip,
time.time(),
self.uname,
)
return FTPHandler.log_transfer(
@@ -313,7 +439,7 @@ except:
class Ftpd(object):
def __init__(self, hub):
def __init__(self, hub: "SvcHub") -> None:
self.hub = hub
self.args = hub.args
@@ -322,24 +448,23 @@ class Ftpd(object):
hs.append([FtpHandler, self.args.ftp])
if self.args.ftps:
try:
h = SftpHandler
h1 = SftpHandler
except:
m = "\nftps requires pyopenssl;\nplease run the following:\n\n {} -m pip install --user pyopenssl\n"
print(m.format(sys.executable))
t = "\nftps requires pyopenssl;\nplease run the following:\n\n {} -m pip install --user pyopenssl\n"
print(t.format(pybin))
sys.exit(1)
h.certfile = os.path.join(E.cfg, "cert.pem")
h.tls_control_required = True
h.tls_data_required = True
h1.certfile = self.args.cert
h1.tls_control_required = True
h1.tls_data_required = True
hs.append([h, self.args.ftps])
hs.append([h1, self.args.ftps])
for h in hs:
h, lp = h
h.hub = hub
h.args = hub.args
h.authorizer = FtpAuth()
h.authorizer.hub = hub
for h_lp in hs:
h2, lp = h_lp
h2.hub = hub
h2.args = hub.args
h2.authorizer = FtpAuth(hub)
if self.args.ftp_pr:
p1, p2 = [int(x) for x in self.args.ftp_pr.split("-")]
@@ -351,24 +476,30 @@ class Ftpd(object):
else:
p1 += d + 1
h.passive_ports = list(range(p1, p2 + 1))
h2.passive_ports = list(range(p1, p2 + 1))
if self.args.ftp_nat:
h.masquerade_address = self.args.ftp_nat
h2.masquerade_address = self.args.ftp_nat
if self.args.ftp_dbg:
config_logging(level=logging.DEBUG)
lgr = logging.getLogger("pyftpdlib")
lgr.setLevel(logging.DEBUG if self.args.ftpv else logging.INFO)
ips = self.args.i
if "::" in ips:
ips.append("0.0.0.0")
ioloop = IOLoop()
for ip in self.args.i:
for ip in ips:
for h, lp in hs:
FTPServer((ip, int(lp)), h, ioloop)
try:
FTPServer((ip, int(lp)), h, ioloop)
except:
if ip != "0.0.0.0" or "::" not in ips:
raise
t = threading.Thread(target=ioloop.loop)
t.daemon = True
t.start()
Daemon(ioloop.loop, "ftp")
def join(p1, p2):
def join(p1: str, p2: str) -> str:
w = os.path.join(p1, p2.replace("\\", "/"))
return os.path.normpath(w).replace("\\", "/")

File diff suppressed because it is too large Load Diff

View File

@@ -1,24 +1,38 @@
# coding: utf-8
from __future__ import print_function, unicode_literals
import re
import argparse # typechk
import os
import time
import re
import socket
import threading # typechk
import time
HAVE_SSL = True
try:
HAVE_SSL = True
import ssl
except:
HAVE_SSL = False
from .__init__ import E
from .util import Unrecv
from . import util as Util
from .__init__ import TYPE_CHECKING, EnvParams
from .authsrv import AuthSrv # typechk
from .httpcli import HttpCli
from .u2idx import U2idx
from .ico import Ico
from .mtag import HAVE_FFMPEG
from .th_cli import ThumbCli
from .th_srv import HAVE_PIL, HAVE_VIPS
from .ico import Ico
from .u2idx import U2idx
from .util import HMaccas, shut_socket
if True: # pylint: disable=using-constant-test
from typing import Optional, Pattern, Union
if TYPE_CHECKING:
from .httpsrv import HttpSrv
PTN_HTTP = re.compile(br"[A-Z]{3}[A-Z ]")
class HttpConn(object):
@@ -27,39 +41,50 @@ class HttpConn(object):
creates an HttpCli for each request (Connection: Keep-Alive)
"""
def __init__(self, sck, addr, hsrv):
def __init__(
self, sck: socket.socket, addr: tuple[str, int], hsrv: "HttpSrv"
) -> None:
self.s = sck
self.sr: Optional[Util._Unrecv] = None
self.cli: Optional[HttpCli] = None
self.addr = addr
self.hsrv = hsrv
self.mutex = hsrv.mutex
self.args = hsrv.args
self.asrv = hsrv.asrv
self.mutex: threading.Lock = hsrv.mutex # mypy404
self.args: argparse.Namespace = hsrv.args # mypy404
self.E: EnvParams = self.args.E
self.asrv: AuthSrv = hsrv.asrv # mypy404
self.cert_path = hsrv.cert_path
self.u2fh = hsrv.u2fh
self.u2fh: Util.FHC = hsrv.u2fh # mypy404
self.iphash: HMaccas = hsrv.broker.iphash
self.bans: dict[str, int] = hsrv.bans
self.aclose: dict[str, int] = hsrv.aclose
enth = (HAVE_PIL or HAVE_VIPS) and not self.args.no_thumb
self.thumbcli = ThumbCli(hsrv) if enth else None
self.ico = Ico(self.args)
enth = (HAVE_PIL or HAVE_VIPS or HAVE_FFMPEG) and not self.args.no_thumb
self.thumbcli: Optional[ThumbCli] = ThumbCli(hsrv) if enth else None # mypy404
self.ico: Ico = Ico(self.args) # mypy404
self.t0 = time.time()
self.t0: float = time.time() # mypy404
self.freshen_pwd: float = 0.0
self.stopping = False
self.nreq = 0
self.nbyte = 0
self.u2idx = None
self.log_func = hsrv.log
self.lf_url = re.compile(self.args.lf_url) if self.args.lf_url else None
self.nreq: int = -1 # mypy404
self.nbyte: int = 0 # mypy404
self.u2idx: Optional[U2idx] = None
self.log_func: "Util.RootLogger" = hsrv.log # mypy404
self.log_src: str = "httpconn" # mypy404
self.lf_url: Optional[Pattern[str]] = (
re.compile(self.args.lf_url) if self.args.lf_url else None
) # mypy404
self.set_rproxy()
def shutdown(self):
def shutdown(self) -> None:
self.stopping = True
try:
self.s.shutdown(socket.SHUT_RDWR)
self.s.close()
shut_socket(self.log, self.s, 1)
except:
pass
def set_rproxy(self, ip=None):
def set_rproxy(self, ip: Optional[str] = None) -> str:
if ip is None:
color = 36
ip = self.addr[0]
@@ -72,35 +97,38 @@ class HttpConn(object):
self.log_src = "{} \033[{}m{}".format(ip, color, self.addr[1]).ljust(26)
return self.log_src
def respath(self, res_name):
return os.path.join(E.mod, "web", res_name)
def respath(self, res_name: str) -> str:
return os.path.join(self.E.mod, "web", res_name)
def log(self, msg, c=0):
def log(self, msg: str, c: Union[int, str] = 0) -> None:
self.log_func(self.log_src, msg, c)
def get_u2idx(self):
def get_u2idx(self) -> Optional[U2idx]:
# grab from a pool of u2idx instances;
# sqlite3 fully parallelizes under python threads
# but avoid running out of FDs by creating too many
if not self.u2idx:
self.u2idx = U2idx(self)
self.u2idx = self.hsrv.get_u2idx(str(self.addr))
return self.u2idx
def _detect_https(self):
def _detect_https(self) -> bool:
method = None
if self.cert_path:
try:
method = self.s.recv(4, socket.MSG_PEEK)
except socket.timeout:
return
return False
except AttributeError:
# jython does not support msg_peek; forget about https
method = self.s.recv(4)
self.sr = Unrecv(self.s)
self.sr = Util.Unrecv(self.s, self.log)
self.sr.buf = method
# jython used to do this, they stopped since it's broken
# but reimplementing sendall is out of scope for now
if not getattr(self.s, "sendall", None):
self.s.sendall = self.s.send
self.s.sendall = self.s.send # type: ignore
if len(method) != 4:
err = "need at least 4 bytes in the first packet; got {}".format(
@@ -110,17 +138,20 @@ class HttpConn(object):
self.log(err)
self.s.send(b"HTTP/1.1 400 Bad Request\r\n\r\n" + err.encode("utf-8"))
return
return False
return method not in [None, b"GET ", b"HEAD", b"POST", b"PUT ", b"OPTI"]
return not method or not bool(PTN_HTTP.match(method))
def run(self) -> None:
self.s.settimeout(10)
def run(self):
self.sr = None
if self.args.https_only:
is_https = True
elif self.args.http_only or not HAVE_SSL:
is_https = False
else:
# raise Exception("asdf")
is_https = self._detect_https()
if is_https:
@@ -149,14 +180,15 @@ class HttpConn(object):
self.s = ctx.wrap_socket(self.s, server_side=True)
msg = [
"\033[1;3{:d}m{}".format(c, s)
for c, s in zip([0, 5, 0], self.s.cipher())
for c, s in zip([0, 5, 0], self.s.cipher()) # type: ignore
]
self.log(" ".join(msg) + "\033[0m")
if self.args.ssl_dbg and hasattr(self.s, "shared_ciphers"):
overlap = [y[::-1] for y in self.s.shared_ciphers()]
lines = [str(x) for x in (["TLS cipher overlap:"] + overlap)]
self.log("\n".join(lines))
ciphers = self.s.shared_ciphers()
assert ciphers
overlap = [str(y[::-1]) for y in ciphers]
self.log("TLS cipher overlap:" + "\n".join(overlap))
for k, v in [
["compression", self.s.compression()],
["ALPN proto", self.s.selected_alpn_protocol()],
@@ -167,11 +199,7 @@ class HttpConn(object):
except Exception as ex:
em = str(ex)
if "ALERT_BAD_CERTIFICATE" in em:
# firefox-linux if there is no exception yet
self.log("client rejected our certificate (nice)")
elif "ALERT_CERTIFICATE_UNKNOWN" in em:
if "ALERT_CERTIFICATE_UNKNOWN" in em:
# android-chrome keeps doing this
pass
@@ -181,10 +209,14 @@ class HttpConn(object):
return
if not self.sr:
self.sr = Unrecv(self.s)
self.sr = Util.Unrecv(self.s, self.log)
while not self.stopping:
self.nreq += 1
cli = HttpCli(self)
if not cli.run():
self.cli = HttpCli(self)
if not self.cli.run():
return
if self.u2idx:
self.hsrv.put_u2idx(str(self.addr), self.u2idx)
self.u2idx = None

View File

@@ -1,17 +1,29 @@
# coding: utf-8
from __future__ import print_function, unicode_literals
import os
import sys
import time
import math
import base64
import math
import os
import socket
import sys
import threading
import time
import queue
from .__init__ import ANYWIN, CORES, EXE, MACOS, TYPE_CHECKING, EnvParams
try:
MNFE = ModuleNotFoundError
except:
MNFE = ImportError
try:
import jinja2
except ImportError:
except MNFE:
if EXE:
raise
print(
"""\033[1;31m
you do not have jinja2 installed,\033[33m
@@ -26,15 +38,31 @@ except ImportError:
)
sys.exit(1)
from .__init__ import E, PY2, MACOS
from .util import FHC, spack, min_ex, start_stackmon, start_log_thrs
from .bos import bos
from .httpconn import HttpConn
from .u2idx import U2idx
from .util import (
E_SCK,
FHC,
Daemon,
Garda,
Magician,
Netdev,
NetMap,
ipnorm,
min_ex,
shut_socket,
spack,
start_log_thrs,
start_stackmon,
)
if PY2:
import Queue as queue
else:
import queue
if TYPE_CHECKING:
from .broker_util import BrokerCli
from .ssdp import SSDPr
if True: # pylint: disable=using-constant-test
from typing import Any, Optional
class HttpSrv(object):
@@ -43,52 +71,72 @@ class HttpSrv(object):
relying on MpSrv for performance (HttpSrv is just plain threads)
"""
def __init__(self, broker, nid):
def __init__(self, broker: "BrokerCli", nid: Optional[int]) -> None:
self.broker = broker
self.nid = nid
self.args = broker.args
self.E: EnvParams = self.args.E
self.log = broker.log
self.asrv = broker.asrv
nsuf = "-n{}-i{:x}".format(nid, os.getpid()) if nid else ""
# redefine in case of multiprocessing
socket.setdefaulttimeout(120)
nsuf = "-n{}-i{:x}".format(nid, os.getpid()) if nid else ""
self.magician = Magician()
self.nm = NetMap([], {})
self.ssdp: Optional["SSDPr"] = None
self.gpwd = Garda(self.args.ban_pw)
self.g404 = Garda(self.args.ban_404)
self.bans: dict[str, int] = {}
self.aclose: dict[str, int] = {}
self.bound: set[tuple[str, int]] = set()
self.name = "hsrv" + nsuf
self.mutex = threading.Lock()
self.stopping = False
self.tp_nthr = 0 # actual
self.tp_ncli = 0 # fading
self.tp_time = None # latest worker collect
self.tp_q = None if self.args.no_htp else queue.LifoQueue()
self.t_periodic = None
self.tp_time = 0.0 # latest worker collect
self.tp_q: Optional[queue.LifoQueue[Any]] = (
None if self.args.no_htp else queue.LifoQueue()
)
self.t_periodic: Optional[threading.Thread] = None
self.u2fh = FHC()
self.srvs = []
self.srvs: list[socket.socket] = []
self.ncli = 0 # exact
self.clients = {} # laggy
self.clients: set[HttpConn] = set() # laggy
self.nclimax = 0
self.cb_ts = 0
self.cb_v = 0
self.cb_ts = 0.0
self.cb_v = ""
try:
x = self.broker.put(True, "thumbsrv.getcfg")
self.th_cfg = x.get()
except:
pass
self.u2idx_free: dict[str, U2idx] = {}
self.u2idx_n = 0
env = jinja2.Environment()
env.loader = jinja2.FileSystemLoader(os.path.join(E.mod, "web"))
self.j2 = {
x: env.get_template(x + ".html")
for x in ["splash", "browser", "browser2", "msg", "md", "mde"]
}
self.prism = os.path.exists(os.path.join(E.mod, "web", "deps", "prism.js.gz"))
env.loader = jinja2.FileSystemLoader(os.path.join(self.E.mod, "web"))
jn = ["splash", "svcs", "browser", "browser2", "msg", "md", "mde", "cf"]
self.j2 = {x: env.get_template(x + ".html") for x in jn}
zs = os.path.join(self.E.mod, "web", "deps", "prism.js.gz")
self.prism = os.path.exists(zs)
cert_path = os.path.join(E.cfg, "cert.pem")
self.mallow = "GET HEAD POST PUT DELETE OPTIONS".split()
if not self.args.no_dav:
zs = "PROPFIND PROPPATCH LOCK UNLOCK MKCOL COPY MOVE"
self.mallow += zs.split()
if self.args.zs:
from .ssdp import SSDPr
self.ssdp = SSDPr(broker)
cert_path = self.args.cert
if bos.path.exists(cert_path):
self.cert_path = cert_path
else:
self.cert_path = None
self.cert_path = ""
if self.tp_q:
self.start_threads(4)
@@ -100,28 +148,41 @@ class HttpSrv(object):
if self.args.log_thrs:
start_log_thrs(self.log, self.args.log_thrs, nid)
def start_threads(self, n):
self.th_cfg: dict[str, Any] = {}
Daemon(self.post_init, "hsrv-init2")
def post_init(self) -> None:
try:
x = self.broker.ask("thumbsrv.getcfg")
self.th_cfg = x.get()
except:
pass
def set_netdevs(self, netdevs: dict[str, Netdev]) -> None:
ips = set()
for ip, _ in self.bound:
ips.add(ip)
self.nm = NetMap(list(ips), netdevs)
def start_threads(self, n: int) -> None:
self.tp_nthr += n
if self.args.log_htp:
self.log(self.name, "workers += {} = {}".format(n, self.tp_nthr), 6)
for _ in range(n):
thr = threading.Thread(
target=self.thr_poolw,
name=self.name + "-poolw",
)
thr.daemon = True
thr.start()
Daemon(self.thr_poolw, self.name + "-poolw")
def stop_threads(self, n):
def stop_threads(self, n: int) -> None:
self.tp_nthr -= n
if self.args.log_htp:
self.log(self.name, "workers -= {} = {}".format(n, self.tp_nthr), 6)
assert self.tp_q
for _ in range(n):
self.tp_q.put(None)
def periodic(self):
def periodic(self) -> None:
while True:
time.sleep(2 if self.tp_ncli or self.ncli else 10)
with self.mutex:
@@ -135,65 +196,134 @@ class HttpSrv(object):
self.t_periodic = None
return
def listen(self, sck, nlisteners):
ip, port = sck.getsockname()
self.srvs.append(sck)
self.nclimax = math.ceil(self.args.nc * 1.0 / nlisteners)
t = threading.Thread(
target=self.thr_listen,
args=(sck,),
name="httpsrv-n{}-listen-{}-{}".format(self.nid or "0", ip, port),
)
t.daemon = True
t.start()
def listen(self, sck: socket.socket, nlisteners: int) -> None:
if self.args.j != 1:
# lost in the pickle; redefine
if not ANYWIN or self.args.reuseaddr:
sck.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
def thr_listen(self, srv_sck):
sck.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
sck.settimeout(None) # < does not inherit, ^ opts above do
ip, port = sck.getsockname()[:2]
self.srvs.append(sck)
self.bound.add((ip, port))
self.nclimax = math.ceil(self.args.nc * 1.0 / nlisteners)
Daemon(
self.thr_listen,
"httpsrv-n{}-listen-{}-{}".format(self.nid or "0", ip, port),
(sck,),
)
def thr_listen(self, srv_sck: socket.socket) -> None:
"""listens on a shared tcp server"""
ip, port = srv_sck.getsockname()
ip, port = srv_sck.getsockname()[:2]
fno = srv_sck.fileno()
msg = "subscribed @ {}:{} f{}".format(ip, port, fno)
hip = "[{}]".format(ip) if ":" in ip else ip
msg = "subscribed @ {}:{} f{} p{}".format(hip, port, fno, os.getpid())
self.log(self.name, msg)
def fun():
self.broker.put(False, "cb_httpsrv_up")
def fun() -> None:
self.broker.say("cb_httpsrv_up")
threading.Thread(target=fun).start()
threading.Thread(target=fun, name="sig-hsrv-up1").start()
while not self.stopping:
if self.args.log_conn:
self.log(self.name, "|%sC-ncli" % ("-" * 1,), c="1;30")
self.log(self.name, "|%sC-ncli" % ("-" * 1,), c="90")
if self.ncli >= self.nclimax:
self.log(self.name, "at connection limit; waiting", 3)
while self.ncli >= self.nclimax:
time.sleep(0.1)
spins = 0
while self.ncli >= self.nclimax:
if not spins:
self.log(self.name, "at connection limit; waiting", 3)
spins += 1
time.sleep(0.1)
if spins != 50 or not self.args.aclose:
continue
ipfreq: dict[str, int] = {}
with self.mutex:
for c in self.clients:
ip = ipnorm(c.ip)
try:
ipfreq[ip] += 1
except:
ipfreq[ip] = 1
ip, n = sorted(ipfreq.items(), key=lambda x: x[1], reverse=True)[0]
if n < self.nclimax / 2:
continue
self.aclose[ip] = int(time.time() + self.args.aclose * 60)
nclose = 0
nloris = 0
nconn = 0
with self.mutex:
for c in self.clients:
cip = ipnorm(c.ip)
if ip != cip:
continue
nconn += 1
try:
if (
c.nreq >= 1
or not c.cli
or c.cli.in_hdr_recv
or c.cli.keepalive
):
Daemon(c.shutdown)
nclose += 1
if c.nreq <= 0 and (not c.cli or c.cli.in_hdr_recv):
nloris += 1
except:
pass
t = "{} downgraded to connection:close for {} min; dropped {}/{} connections"
self.log(self.name, t.format(ip, self.args.aclose, nclose, nconn), 1)
if nloris < nconn / 2:
continue
t = "slowloris (idle-conn): {} banned for {} min"
self.log(self.name, t.format(ip, self.args.loris, nclose), 1)
self.bans[ip] = int(time.time() + self.args.loris * 60)
if self.args.log_conn:
self.log(self.name, "|%sC-acc1" % ("-" * 2,), c="1;30")
self.log(self.name, "|%sC-acc1" % ("-" * 2,), c="90")
try:
sck, addr = srv_sck.accept()
sck, saddr = srv_sck.accept()
cip, cport = saddr[:2]
if cip.startswith("::ffff:"):
cip = cip[7:]
addr = (cip, cport)
except (OSError, socket.error) as ex:
if self.stopping:
break
self.log(self.name, "accept({}): {}".format(fno, ex), c=6)
time.sleep(0.02)
continue
if self.args.log_conn:
m = "|{}C-acc2 \033[0;36m{} \033[3{}m{}".format(
t = "|{}C-acc2 \033[0;36m{} \033[3{}m{}".format(
"-" * 3, ip, port % 8, port
)
self.log("%s %s" % addr, m, c="1;30")
self.log("%s %s" % addr, t, c="90")
self.accept(sck, addr)
def accept(self, sck, addr):
def accept(self, sck: socket.socket, addr: tuple[str, int]) -> None:
"""takes an incoming tcp connection and creates a thread to handle it"""
now = time.time()
if now - (self.tp_time or now) > 300:
m = "httpserver threadpool died: tpt {:.2f}, now {:.2f}, nthr {}, ncli {}"
self.log(self.name, m.format(self.tp_time, now, self.tp_nthr, self.ncli), 1)
self.tp_time = None
t = "httpserver threadpool died: tpt {:.2f}, now {:.2f}, nthr {}, ncli {}"
self.log(self.name, t.format(self.tp_time, now, self.tp_nthr, self.ncli), 1)
self.tp_time = 0
self.tp_q = None
with self.mutex:
@@ -203,10 +333,7 @@ class HttpSrv(object):
if self.nid:
name += "-{}".format(self.nid)
t = threading.Thread(target=self.periodic, name=name)
self.t_periodic = t
t.daemon = True
t.start()
self.t_periodic = Daemon(self.periodic, name)
if self.tp_q:
self.tp_time = self.tp_time or now
@@ -218,25 +345,24 @@ class HttpSrv(object):
return
if not self.args.no_htp:
m = "looks like the httpserver threadpool died; please make an issue on github and tell me the story of how you pulled that off, thanks and dog bless\n"
self.log(self.name, m, 1)
t = "looks like the httpserver threadpool died; please make an issue on github and tell me the story of how you pulled that off, thanks and dog bless\n"
self.log(self.name, t, 1)
thr = threading.Thread(
target=self.thr_client,
args=(sck, addr),
name="httpconn-{}-{}".format(addr[0].split(".", 2)[-1][-6:], addr[1]),
Daemon(
self.thr_client,
"httpconn-{}-{}".format(addr[0].split(".", 2)[-1][-6:], addr[1]),
(sck, addr),
)
thr.daemon = True
thr.start()
def thr_poolw(self):
def thr_poolw(self) -> None:
assert self.tp_q
while True:
task = self.tp_q.get()
if not task:
break
with self.mutex:
self.tp_time = None
self.tp_time = 0
try:
sck, addr = task
@@ -246,10 +372,13 @@ class HttpSrv(object):
)
self.thr_client(sck, addr)
me.name = self.name + "-poolw"
except:
self.log(self.name, "thr_client: " + min_ex(), 3)
except Exception as ex:
if str(ex).startswith("client d/c "):
self.log(self.name, "thr_client: " + str(ex), 6)
else:
self.log(self.name, "thr_client: " + min_ex(), 3)
def shutdown(self):
def shutdown(self) -> None:
self.stopping = True
for srv in self.srvs:
try:
@@ -257,12 +386,12 @@ class HttpSrv(object):
except:
pass
clients = list(self.clients.keys())
thrs = []
clients = list(self.clients)
for cli in clients:
try:
cli.shutdown()
except:
pass
t = threading.Thread(target=cli.shutdown)
thrs.append(t)
t.start()
if self.tp_q:
self.stop_threads(self.tp_nthr)
@@ -271,25 +400,27 @@ class HttpSrv(object):
if self.tp_q.empty():
break
for t in thrs:
t.join()
self.log(self.name, "ok bye")
def thr_client(self, sck, addr):
def thr_client(self, sck: socket.socket, addr: tuple[str, int]) -> None:
"""thread managing one tcp client"""
sck.settimeout(120)
cli = HttpConn(sck, addr, self)
with self.mutex:
self.clients[cli] = 0
self.clients.add(cli)
# print("{}\n".format(len(self.clients)), end="")
fno = sck.fileno()
try:
if self.args.log_conn:
self.log("%s %s" % addr, "|%sC-crun" % ("-" * 4,), c="1;30")
self.log("%s %s" % addr, "|%sC-crun" % ("-" * 4,), c="90")
cli.run()
except (OSError, socket.error) as ex:
if ex.errno not in [10038, 10054, 107, 57, 49, 9]:
if ex.errno not in E_SCK:
self.log(
"%s %s" % addr,
"run({}): {}".format(fno, ex),
@@ -299,33 +430,29 @@ class HttpSrv(object):
finally:
sck = cli.s
if self.args.log_conn:
self.log("%s %s" % addr, "|%sC-cdone" % ("-" * 5,), c="1;30")
self.log("%s %s" % addr, "|%sC-cdone" % ("-" * 5,), c="90")
try:
fno = sck.fileno()
sck.shutdown(socket.SHUT_RDWR)
sck.close()
shut_socket(cli.log, sck)
except (OSError, socket.error) as ex:
if not MACOS:
self.log(
"%s %s" % addr,
"shut({}): {}".format(fno, ex),
c="1;30",
c="90",
)
if ex.errno not in [10038, 10054, 107, 57, 49, 9]:
# 10038 No longer considered a socket
# 10054 Foribly closed by remote
# 107 Transport endpoint not connected
# 57 Socket is not connected
# 49 Can't assign requested address (wifi down)
# 9 Bad file descriptor
if ex.errno not in E_SCK:
raise
finally:
with self.mutex:
del self.clients[cli]
self.clients.remove(cli)
self.ncli -= 1
def cachebuster(self):
if cli.u2idx:
self.put_u2idx(str(addr), cli.u2idx)
def cachebuster(self) -> str:
if time.time() - self.cb_ts < 1:
return self.cb_v
@@ -333,9 +460,9 @@ class HttpSrv(object):
if time.time() - self.cb_ts < 1:
return self.cb_v
v = E.t0
v = self.E.t0
try:
with os.scandir(os.path.join(E.mod, "web")) as dh:
with os.scandir(os.path.join(self.E.mod, "web")) as dh:
for fh in dh:
inf = fh.stat()
v = max(v, inf.st_mtime)
@@ -346,3 +473,31 @@ class HttpSrv(object):
self.cb_v = v.decode("ascii")[-4:]
self.cb_ts = time.time()
return self.cb_v
def get_u2idx(self, ident: str) -> Optional[U2idx]:
utab = self.u2idx_free
for _ in range(100): # 5/0.05 = 5sec
with self.mutex:
if utab:
if ident in utab:
return utab.pop(ident)
return utab.pop(list(utab.keys())[0])
if self.u2idx_n < CORES:
self.u2idx_n += 1
return U2idx(self)
time.sleep(0.05)
# not using conditional waits, on a hunch that
# average performance will be faster like this
# since most servers won't be fully saturated
return None
def put_u2idx(self, ident: str, u2idx: U2idx) -> None:
with self.mutex:
while ident in self.u2idx_free:
ident += "a"
self.u2idx_free[ident] = u2idx

View File

@@ -1,33 +1,69 @@
# coding: utf-8
from __future__ import print_function, unicode_literals
import hashlib
import argparse # typechk
import colorsys
import hashlib
from .__init__ import PY2
from .th_srv import HAVE_PIL
from .util import BytesIO
class Ico(object):
def __init__(self, args):
def __init__(self, args: argparse.Namespace) -> None:
self.args = args
def get(self, ext, as_thumb):
def get(self, ext: str, as_thumb: bool, chrome: bool) -> tuple[str, bytes]:
"""placeholder to make thumbnails not break"""
h = hashlib.md5(ext.encode("utf-8")).digest()[:2]
zb = hashlib.sha1(ext.encode("utf-8")).digest()[2:4]
if PY2:
h = [ord(x) for x in h]
zb = [ord(x) for x in zb]
c1 = colorsys.hsv_to_rgb(h[0] / 256.0, 1, 0.3)
c2 = colorsys.hsv_to_rgb(h[0] / 256.0, 1, 1)
c = list(c1) + list(c2)
c = [int(x * 255) for x in c]
c = "".join(["{:02x}".format(x) for x in c])
c1 = colorsys.hsv_to_rgb(zb[0] / 256.0, 1, 0.3)
c2 = colorsys.hsv_to_rgb(zb[0] / 256.0, 1, 1)
ci = [int(x * 255) for x in list(c1) + list(c2)]
c = "".join(["{:02x}".format(x) for x in ci])
w = 100
h = 30
if not self.args.th_no_crop and as_thumb:
w, h = self.args.th_size.split("x")
h = int(100 / (float(w) / float(h)))
sw, sh = self.args.th_size.split("x")
h = int(100 / (float(sw) / float(sh)))
w = 100
if chrome and as_thumb:
# cannot handle more than ~2000 unique SVGs
if HAVE_PIL:
# svg: 3s, cache: 6s, this: 8s
from PIL import Image, ImageDraw
h = int(64 * h / w)
w = 64
img = Image.new("RGB", (w, h), "#" + c[:6])
pb = ImageDraw.Draw(img)
tw, th = pb.textsize(ext)
pb.text(((w - tw) // 2, (h - th) // 2), ext, fill="#" + c[6:])
img = img.resize((w * 3, h * 3), Image.NEAREST)
buf = BytesIO()
img.save(buf, format="PNG", compress_level=1)
return "image/png", buf.getvalue()
elif False:
# 48s, too slow
import pyvips
h = int(192 * h / w)
w = 192
img = pyvips.Image.text(
ext, width=w, height=h, dpi=192, align=pyvips.Align.CENTRE
)
img = img.ifthenelse(ci[3:], ci[:3], blend=True)
# i = i.resize(3, kernel=pyvips.Kernel.NEAREST)
buf = img.write_to_buffer(".png[compression=1]")
return "image/png", buf
svg = """\
<?xml version="1.0" encoding="UTF-8"?>
@@ -37,6 +73,6 @@ class Ico(object):
fill="#{}" font-family="monospace" font-size="14px" style="letter-spacing:.5px">{}</text>
</g></svg>
"""
svg = svg.format(h, c[:6], c[6:], ext).encode("utf-8")
svg = svg.format(h, c[:6], c[6:], ext)
return ["image/svg+xml", svg]
return "image/svg+xml", svg.encode("utf-8")

538
copyparty/mdns.py Normal file
View File

@@ -0,0 +1,538 @@
# coding: utf-8
from __future__ import print_function, unicode_literals
import random
import select
import socket
import time
from ipaddress import IPv4Network, IPv6Network
from .__init__ import TYPE_CHECKING
from .__init__ import unicode as U
from .multicast import MC_Sck, MCast
from .stolen.dnslib import AAAA
from .stolen.dnslib import CLASS as DC
from .stolen.dnslib import (
NSEC,
PTR,
QTYPE,
RR,
SRV,
TXT,
A,
DNSHeader,
DNSQuestion,
DNSRecord,
)
from .util import CachedSet, Daemon, Netdev, list_ips, min_ex
if TYPE_CHECKING:
from .svchub import SvcHub
if True: # pylint: disable=using-constant-test
from typing import Any, Optional, Union
MDNS4 = "224.0.0.251"
MDNS6 = "ff02::fb"
class MDNS_Sck(MC_Sck):
def __init__(
self,
sck: socket.socket,
nd: Netdev,
grp: str,
ip: str,
net: Union[IPv4Network, IPv6Network],
):
super(MDNS_Sck, self).__init__(sck, nd, grp, ip, net)
self.bp_probe = b""
self.bp_ip = b""
self.bp_svc = b""
self.bp_bye = b""
self.last_tx = 0.0
self.tx_ex = False
class MDNS(MCast):
def __init__(self, hub: "SvcHub", ngen: int) -> None:
al = hub.args
grp4 = "" if al.zm6 else MDNS4
grp6 = "" if al.zm4 else MDNS6
super(MDNS, self).__init__(
hub, MDNS_Sck, al.zm_on, al.zm_off, grp4, grp6, 5353, hub.args.zmv
)
self.srv: dict[socket.socket, MDNS_Sck] = {}
self.logsrc = "mDNS-{}".format(ngen)
self.ngen = ngen
self.ttl = 300
zs = self.args.name + ".local."
zs = zs.encode("ascii", "replace").decode("ascii", "replace")
self.hn = "-".join(x for x in zs.split("?") if x) or (
"vault-{}".format(random.randint(1, 255))
)
self.lhn = self.hn.lower()
# requester ip -> (response deadline, srv, body):
self.q: dict[str, tuple[float, MDNS_Sck, bytes]] = {}
self.rx4 = CachedSet(0.42) # 3 probes @ 250..500..750 => 500ms span
self.rx6 = CachedSet(0.42)
self.svcs, self.sfqdns = self.build_svcs()
self.lsvcs = {k.lower(): v for k, v in self.svcs.items()}
self.lsfqdns = set([x.lower() for x in self.sfqdns])
self.probing = 0.0
self.unsolicited: list[float] = [] # scheduled announces on all nics
self.defend: dict[MDNS_Sck, float] = {} # server -> deadline
def log(self, msg: str, c: Union[int, str] = 0) -> None:
self.log_func(self.logsrc, msg, c)
def build_svcs(self) -> tuple[dict[str, dict[str, Any]], set[str]]:
zms = self.args.zms
http = {"port": 80 if 80 in self.args.p else self.args.p[0]}
https = {"port": 443 if 443 in self.args.p else self.args.p[0]}
webdav = http.copy()
webdavs = https.copy()
webdav["u"] = webdavs["u"] = "u" # KDE requires username
ftp = {"port": (self.args.ftp if "f" in zms else self.args.ftps)}
smb = {"port": self.args.smb_port}
# some gvfs require path
zs = self.args.zm_ld or "/"
if zs:
webdav["path"] = zs
webdavs["path"] = zs
if self.args.zm_lh:
http["path"] = self.args.zm_lh
https["path"] = self.args.zm_lh
if self.args.zm_lf:
ftp["path"] = self.args.zm_lf
if self.args.zm_ls:
smb["path"] = self.args.zm_ls
svcs: dict[str, dict[str, Any]] = {}
if "d" in zms:
svcs["_webdav._tcp.local."] = webdav
if "D" in zms:
svcs["_webdavs._tcp.local."] = webdavs
if "h" in zms:
svcs["_http._tcp.local."] = http
if "H" in zms:
svcs["_https._tcp.local."] = https
if "f" in zms.lower():
svcs["_ftp._tcp.local."] = ftp
if "s" in zms.lower():
svcs["_smb._tcp.local."] = smb
sfqdns: set[str] = set()
for k, v in svcs.items():
name = "{}-c-{}".format(self.args.name, k.split(".")[0][1:])
v["name"] = name
sfqdns.add("{}.{}".format(name, k))
return svcs, sfqdns
def build_replies(self) -> None:
for srv in self.srv.values():
probe = DNSRecord(DNSHeader(0, 0), q=DNSQuestion(self.hn, QTYPE.ANY))
areply = DNSRecord(DNSHeader(0, 0x8400))
sreply = DNSRecord(DNSHeader(0, 0x8400))
bye = DNSRecord(DNSHeader(0, 0x8400))
have4 = have6 = False
for s2 in self.srv.values():
if srv.idx != s2.idx:
continue
if s2.v6:
have6 = True
else:
have4 = True
for ip in srv.ips:
if ":" in ip:
qt = QTYPE.AAAA
ar = {"rclass": DC.F_IN, "rdata": AAAA(ip)}
else:
qt = QTYPE.A
ar = {"rclass": DC.F_IN, "rdata": A(ip)}
r0 = RR(self.hn, qt, ttl=0, **ar)
r120 = RR(self.hn, qt, ttl=120, **ar)
# rfc-10:
# SHOULD rr ttl 120sec for A/AAAA/SRV
# (and recommend 75min for all others)
probe.add_auth(r120)
areply.add_answer(r120)
sreply.add_answer(r120)
bye.add_answer(r0)
for sclass, props in self.svcs.items():
sname = props["name"]
sport = props["port"]
sfqdn = sname + "." + sclass
k = "_services._dns-sd._udp.local."
r = RR(k, QTYPE.PTR, DC.IN, 4500, PTR(sclass))
sreply.add_answer(r)
r = RR(sclass, QTYPE.PTR, DC.IN, 4500, PTR(sfqdn))
sreply.add_answer(r)
r = RR(sfqdn, QTYPE.SRV, DC.F_IN, 120, SRV(0, 0, sport, self.hn))
sreply.add_answer(r)
areply.add_answer(r)
r = RR(sfqdn, QTYPE.SRV, DC.F_IN, 0, SRV(0, 0, sport, self.hn))
bye.add_answer(r)
txts = []
for k in ("u", "path"):
if k not in props:
continue
zb = "{}={}".format(k, props[k]).encode("utf-8")
if len(zb) > 255:
t = "value too long for mdns: [{}]"
raise Exception(t.format(props[k]))
txts.append(zb)
# gvfs really wants txt even if they're empty
r = RR(sfqdn, QTYPE.TXT, DC.F_IN, 4500, TXT(txts))
sreply.add_answer(r)
if not (have4 and have6) and not self.args.zm_noneg:
ns = NSEC(self.hn, ["AAAA" if have6 else "A"])
r = RR(self.hn, QTYPE.NSEC, DC.F_IN, 120, ns)
areply.add_ar(r)
if len(sreply.pack()) < 1400:
sreply.add_ar(r)
srv.bp_probe = probe.pack()
srv.bp_ip = areply.pack()
srv.bp_svc = sreply.pack()
srv.bp_bye = bye.pack()
# since all replies are small enough to fit in one packet,
# always send full replies rather than just a/aaaa records
srv.bp_ip = srv.bp_svc
def send_probes(self) -> None:
slp = random.random() * 0.25
for _ in range(3):
time.sleep(slp)
slp = 0.25
if not self.running:
break
if self.args.zmv:
self.log("sending hostname probe...")
# ipv4: need to probe each ip (each server)
# ipv6: only need to probe each set of looped nics
probed6: set[str] = set()
for srv in self.srv.values():
if srv.ip in probed6:
continue
try:
srv.sck.sendto(srv.bp_probe, (srv.grp, 5353))
if srv.v6:
for ip in srv.ips:
probed6.add(ip)
except Exception as ex:
self.log("sendto failed: {} ({})".format(srv.ip, ex), "90")
def run(self) -> None:
try:
bound = self.create_servers()
except:
t = "no server IP matches the mdns config\n{}"
self.log(t.format(min_ex()), 1)
bound = []
if not bound:
self.log("failed to announce copyparty services on the network", 3)
return
self.build_replies()
Daemon(self.send_probes)
zf = time.time() + 2
self.probing = zf # cant unicast so give everyone an extra sec
self.unsolicited = [zf, zf + 1, zf + 3, zf + 7] # rfc-8.3
last_hop = time.time()
ihop = self.args.mc_hop
while self.running:
timeout = (
0.02 + random.random() * 0.07
if self.probing or self.q or self.defend or self.unsolicited
else (last_hop + ihop if ihop else 180)
)
rdy = select.select(self.srv, [], [], timeout)
rx: list[socket.socket] = rdy[0] # type: ignore
self.rx4.cln()
self.rx6.cln()
buf = b""
addr = ("0", 0)
for sck in rx:
try:
buf, addr = sck.recvfrom(4096)
self.eat(buf, addr, sck)
except:
if not self.running:
self.log("stopped", 2)
return
t = "{} {} \033[33m|{}| {}\n{}".format(
self.srv[sck].name, addr, len(buf), repr(buf)[2:-1], min_ex()
)
self.log(t, 6)
if not self.probing:
self.process()
continue
if self.probing < time.time():
t = "probe ok; announcing [{}]"
self.log(t.format(self.hn[:-1]), 2)
self.probing = 0
self.log("stopped", 2)
def stop(self, panic=False) -> None:
self.running = False
for srv in self.srv.values():
try:
if panic:
srv.sck.close()
else:
srv.sck.sendto(srv.bp_bye, (srv.grp, 5353))
except:
pass
self.srv = {}
def eat(self, buf: bytes, addr: tuple[str, int], sck: socket.socket) -> None:
cip = addr[0]
v6 = ":" in cip
if (cip.startswith("169.254") and not self.ll_ok) or (
v6 and not cip.startswith("fe80")
):
return
cache = self.rx6 if v6 else self.rx4
if buf in cache.c:
return
srv: Optional[MDNS_Sck] = self.srv[sck] if v6 else self.map_client(cip) # type: ignore
if not srv:
return
cache.add(buf)
now = time.time()
if self.args.zmv and cip != srv.ip and cip not in srv.ips:
t = "{} [{}] \033[36m{} \033[0m|{}|"
self.log(t.format(srv.name, srv.ip, cip, len(buf)), "90")
p = DNSRecord.parse(buf)
if self.args.zmvv:
self.log(str(p))
# check for incoming probes for our hostname
cips = [U(x.rdata) for x in p.auth if U(x.rname).lower() == self.lhn]
if cips and self.sips.isdisjoint(cips):
if not [x for x in cips if x not in ("::1", "127.0.0.1")]:
# avahi broadcasting 127.0.0.1-only packets
return
self.log("someone trying to steal our hostname: {}".format(cips), 3)
# immediately unicast
if not self.probing:
srv.sck.sendto(srv.bp_ip, (cip, 5353))
# and schedule multicast
self.defend[srv] = self.defend.get(srv, now + 0.1)
return
# check for someone rejecting our probe / hijacking our hostname
cips = [
U(x.rdata)
for x in p.rr
if U(x.rname).lower() == self.lhn and x.rclass == DC.F_IN
]
if cips and self.sips.isdisjoint(cips):
if not [x for x in cips if x not in ("::1", "127.0.0.1")]:
# avahi broadcasting 127.0.0.1-only packets
return
# check if we've been given additional IPs
for ip in list_ips():
if ip in cips:
self.sips.add(ip)
if not self.sips.isdisjoint(cips):
return
t = "mdns zeroconf: "
if self.probing:
t += "Cannot start; hostname '{}' is occupied"
else:
t += "Emergency stop; hostname '{}' got stolen"
t += " on {}! Use --name to set another hostname.\n\nName taken by {}\n\nYour IPs: {}\n"
self.log(t.format(self.args.name, srv.name, cips, list(self.sips)), 1)
self.stop(True)
return
# then rfc-6.7; dns pretending to be mdns (android...)
if p.header.id or addr[1] != 5353:
rsp: Optional[DNSRecord] = None
for r in p.questions:
try:
lhn = U(r.qname).lower()
except:
self.log("invalid question: {}".format(r))
continue
if lhn != self.lhn:
continue
if p.header.id and r.qtype in (QTYPE.A, QTYPE.AAAA):
rsp = rsp or DNSRecord(DNSHeader(p.header.id, 0x8400))
rsp.add_question(r)
for ip in srv.ips:
qt = r.qtype
v6 = ":" in ip
if v6 == (qt == QTYPE.AAAA):
rd = AAAA(ip) if v6 else A(ip)
rr = RR(self.hn, qt, DC.IN, 10, rd)
rsp.add_answer(rr)
if rsp:
srv.sck.sendto(rsp.pack(), addr[:2])
# but don't return in case it's a differently broken client
# then a/aaaa records
for r in p.questions:
try:
lhn = U(r.qname).lower()
except:
self.log("invalid question: {}".format(r))
continue
if lhn != self.lhn:
continue
# gvfs keeps repeating itself
found = False
unicast = False
for rr in p.rr:
try:
rname = U(rr.rname).lower()
except:
self.log("invalid rr: {}".format(rr))
continue
if rname == self.lhn:
if rr.ttl > 60:
found = True
if rr.rclass == DC.F_IN:
unicast = True
if unicast:
# spec-compliant mDNS-over-unicast
srv.sck.sendto(srv.bp_ip, (cip, 5353))
elif addr[1] != 5353:
# just in case some clients use (and want us to use) invalid ports
srv.sck.sendto(srv.bp_ip, addr[:2])
if not found:
self.q[cip] = (0, srv, srv.bp_ip)
return
deadline = now + (0.5 if p.header.tc else 0.02) # rfc-7.2
# and service queries
for r in p.questions:
if not r or not r.qname:
continue
qname = U(r.qname).lower()
if qname in self.lsvcs or qname == "_services._dns-sd._udp.local.":
self.q[cip] = (deadline, srv, srv.bp_svc)
break
# heed rfc-7.1 if there was an announce in the past 12sec
# (workaround gvfs race-condition where it occasionally
# doesn't read/decode the full response...)
if now < srv.last_tx + 12:
for rr in p.rr:
if not rr.rdata:
continue
rdata = U(rr.rdata).lower()
if rdata in self.lsfqdns:
if rr.ttl > 2250:
self.q.pop(cip, None)
break
def process(self) -> None:
tx = set()
now = time.time()
cooldown = 0.9 # rfc-6: 1
if self.unsolicited and self.unsolicited[0] < now:
self.unsolicited.pop(0)
cooldown = 0.1
for srv in self.srv.values():
tx.add(srv)
for srv, deadline in list(self.defend.items()):
if now < deadline:
continue
if self._tx(srv, srv.bp_ip, 0.02): # rfc-6: 0.25
self.defend.pop(srv)
for cip, (deadline, srv, msg) in list(self.q.items()):
if now < deadline:
continue
self.q.pop(cip)
self._tx(srv, msg, cooldown)
for srv in tx:
self._tx(srv, srv.bp_svc, cooldown)
def _tx(self, srv: MDNS_Sck, msg: bytes, cooldown: float) -> bool:
now = time.time()
if now < srv.last_tx + cooldown:
return False
try:
srv.sck.sendto(msg, (srv.grp, 5353))
srv.last_tx = now
except Exception as ex:
if srv.tx_ex:
return True
srv.tx_ex = True
t = "tx({},|{}|,{}): {}"
self.log(t.format(srv.ip, len(msg), cooldown, ex), 3)
return True

View File

@@ -1,28 +1,44 @@
# coding: utf-8
from __future__ import print_function, unicode_literals
import os
import sys
import argparse
import json
import os
import shutil
import subprocess as sp
import sys
from .__init__ import PY2, WINDOWS, unicode
from .util import fsenc, fsdec, uncyg, runcmd, REKOBO_LKEY
from .__init__ import EXE, PY2, WINDOWS, E, unicode
from .bos import bos
from .util import (
FFMPEG_URL,
REKOBO_LKEY,
fsenc,
min_ex,
pybin,
retchk,
runcmd,
sfsenc,
uncyg,
)
if True: # pylint: disable=using-constant-test
from typing import Any, Union
from .util import RootLogger
def have_ff(cmd):
def have_ff(scmd: str) -> bool:
if PY2:
print("# checking {}".format(cmd))
cmd = (cmd + " -version").encode("ascii").split(b" ")
print("# checking {}".format(scmd))
acmd = (scmd + " -version").encode("ascii").split(b" ")
try:
sp.Popen(cmd, stdout=sp.PIPE, stderr=sp.PIPE).communicate()
sp.Popen(acmd, stdout=sp.PIPE, stderr=sp.PIPE).communicate()
return True
except:
return False
else:
return bool(shutil.which(cmd))
return bool(shutil.which(scmd))
HAVE_FFMPEG = have_ff("ffmpeg")
@@ -30,13 +46,16 @@ HAVE_FFPROBE = have_ff("ffprobe")
class MParser(object):
def __init__(self, cmdline):
def __init__(self, cmdline: str) -> None:
self.tag, args = cmdline.split("=", 1)
self.tags = self.tag.split(",")
self.timeout = 30
self.timeout = 60
self.force = False
self.kill = "t" # tree; all children recursively
self.capture = 3 # outputs to consume
self.audio = "y"
self.pri = 0 # priority; higher = later
self.ext = []
while True:
@@ -58,6 +77,14 @@ class MParser(object):
self.audio = arg[1:] # [r]equire [n]ot [d]ontcare
continue
if arg.startswith("k"):
self.kill = arg[1:] # [t]ree [m]ain [n]one
continue
if arg.startswith("c"):
self.capture = int(arg[1:]) # 0=none 1=stdout 2=stderr 3=both
continue
if arg == "f":
self.force = True
continue
@@ -70,10 +97,16 @@ class MParser(object):
self.ext.append(arg[1:])
continue
if arg.startswith("p"):
self.pri = int(arg[1:] or "1")
continue
raise Exception()
def ffprobe(abspath, timeout=10):
def ffprobe(
abspath: str, timeout: int = 60
) -> tuple[dict[str, tuple[int, Any]], dict[str, list[Any]]]:
cmd = [
b"ffprobe",
b"-hide_banner",
@@ -82,19 +115,20 @@ def ffprobe(abspath, timeout=10):
b"--",
fsenc(abspath),
]
rc = runcmd(cmd, timeout=timeout)
return parse_ffprobe(rc[1])
rc, so, se = runcmd(cmd, timeout=timeout)
retchk(rc, cmd, se)
return parse_ffprobe(so)
def parse_ffprobe(txt):
def parse_ffprobe(txt: str) -> tuple[dict[str, tuple[int, Any]], dict[str, list[Any]]]:
"""ffprobe -show_format -show_streams"""
streams = []
fmt = {}
g = None
g = {}
for ln in [x.rstrip("\r") for x in txt.split("\n")]:
try:
k, v = ln.split("=", 1)
g[k] = v
sk, sv = ln.split("=", 1)
g[sk] = sv
continue
except:
pass
@@ -108,8 +142,8 @@ def parse_ffprobe(txt):
fmt = g
streams = [fmt] + streams
ret = {} # processed
md = {} # raw tags
ret: dict[str, Any] = {} # processed
md: dict[str, list[Any]] = {} # raw tags
is_audio = fmt.get("format_name") in ["mp3", "ogg", "flac", "wav"]
if fmt.get("filename", "").split(".")[-1].lower() in ["m4a", "aac"]:
@@ -157,52 +191,55 @@ def parse_ffprobe(txt):
]
if typ == "format":
kvm = [["duration", ".dur"], ["bit_rate", ".q"]]
kvm = [["duration", ".dur"], ["bit_rate", ".q"], ["format_name", "fmt"]]
for sk, rk in kvm:
v = strm.get(sk)
if v is None:
v1 = strm.get(sk)
if v1 is None:
continue
if rk.startswith("."):
try:
v = float(v)
zf = float(v1)
v2 = ret.get(rk)
if v2 is None or v > v2:
ret[rk] = v
if v2 is None or zf > v2:
ret[rk] = zf
except:
# sqlite doesnt care but the code below does
if v not in ["N/A"]:
ret[rk] = v
if v1 not in ["N/A"]:
ret[rk] = v1
else:
ret[rk] = v
ret[rk] = v1
if ret.get("vc") == "ansi": # shellscript
return {}, {}
for strm in streams:
for k, v in strm.items():
if not k.startswith("TAG:"):
for sk, sv in strm.items():
if not sk.startswith("TAG:"):
continue
k = k[4:].strip()
v = v.strip()
if k and v and k not in md:
md[k] = [v]
sk = sk[4:].strip()
sv = sv.strip()
if sk and sv and sk not in md:
md[sk] = [sv]
for k in [".q", ".vq", ".aq"]:
if k in ret:
ret[k] /= 1000 # bit_rate=320000
for sk in [".q", ".vq", ".aq"]:
if sk in ret:
ret[sk] /= 1000 # bit_rate=320000
for k in [".q", ".vq", ".aq", ".resw", ".resh"]:
if k in ret:
ret[k] = int(ret[k])
for sk in [".q", ".vq", ".aq", ".resw", ".resh"]:
if sk in ret:
ret[sk] = int(ret[sk])
if ".fps" in ret:
fps = ret[".fps"]
if "/" in fps:
fa, fb = fps.split("/")
fps = int(fa) * 1.0 / int(fb)
try:
fps = int(fa) * 1.0 / int(fb)
except:
fps = 9001
if fps < 1000 and fmt.get("format_name") not in ["image2", "png_pipe"]:
ret[".fps"] = round(fps, 3)
@@ -215,33 +252,34 @@ def parse_ffprobe(txt):
if ".q" in ret:
del ret[".q"]
if "fmt" in ret:
ret["fmt"] = ret["fmt"].split(",")[0]
if ".resw" in ret and ".resh" in ret:
ret["res"] = "{}x{}".format(ret[".resw"], ret[".resh"])
ret = {k: [0, v] for k, v in ret.items()}
zd = {k: (0, v) for k, v in ret.items()}
return ret, md
return zd, md
class MTag(object):
def __init__(self, log_func, args):
def __init__(self, log_func: "RootLogger", args: argparse.Namespace) -> None:
self.log_func = log_func
self.args = args
self.usable = True
self.prefer_mt = not args.no_mtag_ff
self.backend = "ffprobe" if args.no_mutagen else "mutagen"
self.can_ffprobe = (
HAVE_FFPROBE
and not args.no_mtag_ff
and (not WINDOWS or sys.version_info >= (3, 8))
self.backend = (
"ffprobe" if args.no_mutagen or (HAVE_FFPROBE and EXE) else "mutagen"
)
self.can_ffprobe = HAVE_FFPROBE and not args.no_mtag_ff
mappings = args.mtm
or_ffprobe = " or FFprobe"
if self.backend == "mutagen":
self.get = self.get_mutagen
try:
import mutagen
from mutagen import version # noqa: F401
except:
self.log("could not load Mutagen, trying FFprobe instead", c=3)
self.backend = "ffprobe"
@@ -258,15 +296,15 @@ class MTag(object):
msg = "found FFprobe but it was disabled by --no-mtag-ff"
self.log(msg, c=3)
elif WINDOWS and sys.version_info < (3, 8):
or_ffprobe = " or python >= 3.8"
msg = "found FFprobe but your python is too old; need 3.8 or newer"
self.log(msg, c=1)
if not self.usable:
if EXE:
t = "copyparty.exe cannot use mutagen; need ffprobe.exe to read media tags: "
self.log(t + FFMPEG_URL)
return
msg = "need Mutagen{} to read media tags so please run this:\n{}{} -m pip install --user mutagen\n"
pybin = os.path.basename(sys.executable)
self.log(msg.format(or_ffprobe, " " * 37, pybin), c=1)
pyname = os.path.basename(pybin)
self.log(msg.format(or_ffprobe, " " * 37, pyname), c=1)
return
# https://picard-docs.musicbrainz.org/downloads/MusicBrainz_Picard_Tag_Map.html
@@ -338,41 +376,49 @@ class MTag(object):
}
# self.get = self.compare
def log(self, msg, c=0):
def log(self, msg: str, c: Union[int, str] = 0) -> None:
self.log_func("mtag", msg, c)
def normalize_tags(self, ret, md):
for k, v in dict(md).items():
if not v:
def normalize_tags(
self, parser_output: dict[str, tuple[int, Any]], md: dict[str, list[Any]]
) -> dict[str, Union[str, float]]:
for sk, tv in dict(md).items():
if not tv:
continue
k = k.lower().split("::")[0].strip()
mk = self.rmap.get(k)
if not mk:
sk = sk.lower().split("::")[0].strip()
key_mapping = self.rmap.get(sk)
if not key_mapping:
continue
pref, mk = mk
if mk not in ret or ret[mk][0] > pref:
ret[mk] = [pref, v[0]]
priority, alias = key_mapping
if alias not in parser_output or parser_output[alias][0] > priority:
parser_output[alias] = (priority, tv[0])
# take first value
ret = {k: unicode(v[1]).strip() for k, v in ret.items()}
# take first value (lowest priority / most preferred)
ret: dict[str, Union[str, float]] = {
sk: unicode(tv[1]).strip() for sk, tv in parser_output.items()
}
# track 3/7 => track 3
for k, v in ret.items():
if k[0] == ".":
v = v.split("/")[0].strip().lstrip("0")
ret[k] = v or 0
for sk, zv in ret.items():
if sk[0] == ".":
sv = str(zv).split("/")[0].strip().lstrip("0")
ret[sk] = sv or 0
# normalize key notation to rkeobo
okey = ret.get("key")
if okey:
key = okey.replace(" ", "").replace("maj", "").replace("min", "m")
key = str(okey).replace(" ", "").replace("maj", "").replace("min", "m")
ret["key"] = REKOBO_LKEY.get(key.lower(), okey)
if self.args.mtag_vv:
zl = " ".join("\033[36m{} \033[33m{}".format(k, v) for k, v in ret.items())
self.log("norm: {}\033[0m".format(zl), "90")
return ret
def compare(self, abspath):
def compare(self, abspath: str) -> dict[str, Union[str, float]]:
if abspath.endswith(".au"):
return {}
@@ -410,21 +456,34 @@ class MTag(object):
return r1
def get_mutagen(self, abspath):
def get_mutagen(self, abspath: str) -> dict[str, Union[str, float]]:
ret: dict[str, tuple[int, Any]] = {}
if not bos.path.isfile(abspath):
return {}
import mutagen
from mutagen import File
try:
md = mutagen.File(fsenc(abspath), easy=True)
md = File(fsenc(abspath), easy=True)
assert md
if self.args.mtag_vv:
for zd in (md.info.__dict__, dict(md.tags)):
zl = ["\033[36m{} \033[33m{}".format(k, v) for k, v in zd.items()]
self.log("mutagen: {}\033[0m".format(" ".join(zl)), "90")
if not md.info.length and not md.info.codec:
raise Exception()
except Exception as ex:
if self.args.mtag_v:
self.log("mutagen-err [{}] @ [{}]".format(ex, abspath), "90")
return self.get_ffprobe(abspath) if self.can_ffprobe else {}
sz = bos.path.getsize(abspath)
ret = {".q": [0, int((sz / md.info.length) / 128)]}
try:
ret[".q"] = (0, int((sz / md.info.length) / 128))
except:
pass
for attr, k, norm in [
["codec", "ac", unicode],
@@ -455,54 +514,83 @@ class MTag(object):
if k == "ac" and v.startswith("mp4a.40."):
v = "aac"
ret[k] = [0, norm(v)]
ret[k] = (0, norm(v))
return self.normalize_tags(ret, md)
def get_ffprobe(self, abspath):
def get_ffprobe(self, abspath: str) -> dict[str, Union[str, float]]:
if not bos.path.isfile(abspath):
return {}
ret, md = ffprobe(abspath)
ret, md = ffprobe(abspath, self.args.mtag_to)
if self.args.mtag_vv:
for zd in (ret, dict(md)):
zl = ["\033[36m{} \033[33m{}".format(k, v) for k, v in zd.items()]
self.log("ffprobe: {}\033[0m".format(" ".join(zl)), "90")
return self.normalize_tags(ret, md)
def get_bin(self, parsers, abspath):
def get_bin(
self, parsers: dict[str, MParser], abspath: str, oth_tags: dict[str, Any]
) -> dict[str, Any]:
if not bos.path.isfile(abspath):
return {}
pypath = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
pypath = [str(pypath)] + [str(x) for x in sys.path if x]
pypath = str(os.pathsep.join(pypath))
env = os.environ.copy()
env["PYTHONPATH"] = pypath
try:
if EXE:
raise Exception()
ret = {}
for tagname, parser in parsers.items():
pypath = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
zsl = [str(pypath)] + [str(x) for x in sys.path if x]
pypath = str(os.pathsep.join(zsl))
env["PYTHONPATH"] = pypath
except:
if not E.ox and not EXE:
raise
ret: dict[str, Any] = {}
for tagname, parser in sorted(parsers.items(), key=lambda x: (x[1].pri, x[0])):
try:
cmd = [parser.bin, abspath]
if parser.bin.endswith(".py"):
cmd = [sys.executable] + cmd
cmd = [pybin] + cmd
args = {"env": env, "timeout": parser.timeout}
args = {
"env": env,
"timeout": parser.timeout,
"kill": parser.kill,
"capture": parser.capture,
}
if parser.pri:
zd = oth_tags.copy()
zd.update(ret)
args["sin"] = json.dumps(zd).encode("utf-8", "replace")
if WINDOWS:
args["creationflags"] = 0x4000
else:
cmd = ["nice"] + cmd
cmd = [fsenc(x) for x in cmd]
v = sp.check_output(cmd, **args).strip()
bcmd = [sfsenc(x) for x in cmd[:-1]] + [fsenc(cmd[-1])]
rc, v, err = runcmd(bcmd, **args) # type: ignore
retchk(rc, bcmd, err, self.log, 5, self.args.mtag_v)
v = v.strip()
if not v:
continue
if "," not in tagname:
ret[tagname] = v.decode("utf-8")
ret[tagname] = v
else:
v = json.loads(v)
zj = json.loads(v)
for tag in tagname.split(","):
if tag and tag in v:
ret[tag] = v[tag]
if tag and tag in zj:
ret[tag] = zj[tag]
except:
pass
if self.args.mtag_v:
t = "mtag error: tagname {}, parser {}, file {} => {}"
self.log(t.format(tagname, parser.bin, abspath, min_ex()))
return ret

370
copyparty/multicast.py Normal file
View File

@@ -0,0 +1,370 @@
# coding: utf-8
from __future__ import print_function, unicode_literals
import socket
import time
import ipaddress
from ipaddress import (
IPv4Address,
IPv4Network,
IPv6Address,
IPv6Network,
ip_address,
ip_network,
)
from .__init__ import MACOS, TYPE_CHECKING
from .util import Netdev, find_prefix, min_ex, spack
if TYPE_CHECKING:
from .svchub import SvcHub
if True: # pylint: disable=using-constant-test
from typing import Optional, Union
if not hasattr(socket, "IPPROTO_IPV6"):
setattr(socket, "IPPROTO_IPV6", 41)
class NoIPs(Exception):
pass
class MC_Sck(object):
"""there is one socket for each server ip"""
def __init__(
self,
sck: socket.socket,
nd: Netdev,
grp: str,
ip: str,
net: Union[IPv4Network, IPv6Network],
):
self.sck = sck
self.idx = nd.idx
self.name = nd.name
self.grp = grp
self.mreq = b""
self.ip = ip
self.net = net
self.ips = {ip: net}
self.v6 = ":" in ip
self.have4 = ":" not in ip
self.have6 = ":" in ip
class MCast(object):
def __init__(
self,
hub: "SvcHub",
Srv: type[MC_Sck],
on: list[str],
off: list[str],
mc_grp_4: str,
mc_grp_6: str,
port: int,
vinit: bool,
) -> None:
"""disable ipv%d by setting mc_grp_%d empty"""
self.hub = hub
self.Srv = Srv
self.args = hub.args
self.asrv = hub.asrv
self.log_func = hub.log
self.on = on
self.off = off
self.grp4 = mc_grp_4
self.grp6 = mc_grp_6
self.port = port
self.vinit = vinit
self.srv: dict[socket.socket, MC_Sck] = {} # listening sockets
self.sips: set[str] = set() # all listening ips (including failed attempts)
self.ll_ok: set[str] = set() # fallback linklocal IPv4 and IPv6 addresses
self.b2srv: dict[bytes, MC_Sck] = {} # binary-ip -> server socket
self.b4: list[bytes] = [] # sorted list of binary-ips
self.b6: list[bytes] = [] # sorted list of binary-ips
self.cscache: dict[str, Optional[MC_Sck]] = {} # client ip -> server cache
self.running = True
def log(self, msg: str, c: Union[int, str] = 0) -> None:
self.log_func("multicast", msg, c)
def create_servers(self) -> list[str]:
bound: list[str] = []
netdevs = self.hub.tcpsrv.netdevs
ips = [x[0] for x in self.hub.tcpsrv.bound]
if "::" in ips:
ips = [x for x in ips if x != "::"] + list(
[x.split("/")[0] for x in netdevs if ":" in x]
)
ips.append("0.0.0.0")
if "0.0.0.0" in ips:
ips = [x for x in ips if x != "0.0.0.0"] + list(
[x.split("/")[0] for x in netdevs if ":" not in x]
)
ips = [x for x in ips if x not in ("::1", "127.0.0.1")]
ips = find_prefix(ips, netdevs)
on = self.on[:]
off = self.off[:]
for lst in (on, off):
for av in list(lst):
try:
arg_net = ip_network(av, False)
except:
arg_net = None
for sk, sv in netdevs.items():
if arg_net:
net_ip = ip_address(sk.split("/")[0])
if net_ip in arg_net and sk not in lst:
lst.append(sk)
if (av == str(sv.idx) or av == sv.name) and sk not in lst:
lst.append(sk)
if on:
ips = [x for x in ips if x in on]
elif off:
ips = [x for x in ips if x not in off]
if not self.grp4:
ips = [x for x in ips if ":" in x]
if not self.grp6:
ips = [x for x in ips if ":" not in x]
ips = list(set(ips))
all_selected = ips[:]
# discard non-linklocal ipv6
ips = [x for x in ips if ":" not in x or x.startswith("fe80")]
if not ips:
raise NoIPs()
for ip in ips:
v6 = ":" in ip
netdev = netdevs[ip]
if not netdev.idx:
t = "using INADDR_ANY for ip [{}], netdev [{}]"
if not self.srv and ip not in ["::", "0.0.0.0"]:
self.log(t.format(ip, netdev), 3)
ipv = socket.AF_INET6 if v6 else socket.AF_INET
sck = socket.socket(ipv, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
sck.settimeout(None)
sck.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
try:
sck.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
except:
pass
# most ipv6 clients expect multicast on linklocal ip only;
# add a/aaaa records for the other nic IPs
other_ips: set[str] = set()
if v6:
for nd in netdevs.values():
if nd.idx == netdev.idx and nd.ip in all_selected and ":" in nd.ip:
other_ips.add(nd.ip)
net = ipaddress.ip_network(ip, False)
ip = ip.split("/")[0]
srv = self.Srv(sck, netdev, self.grp6 if ":" in ip else self.grp4, ip, net)
for oth_ip in other_ips:
srv.ips[oth_ip.split("/")[0]] = ipaddress.ip_network(oth_ip, False)
# gvfs breaks if a linklocal ip appears in a dns reply
ll = {
k: v
for k, v in srv.ips.items()
if k.startswith("169.254") or k.startswith("fe80")
}
rt = {k: v for k, v in srv.ips.items() if k not in ll}
if self.args.ll or not rt:
self.ll_ok.update(list(ll))
if not self.args.ll:
srv.ips = rt or ll
if not srv.ips:
self.log("no IPs on {}; skipping [{}]".format(netdev, ip), 3)
continue
try:
self.setup_socket(srv)
self.srv[sck] = srv
bound.append(ip)
except:
t = "announce failed on {} [{}]:\n{}"
self.log(t.format(netdev, ip, min_ex()), 3)
if self.args.zm_msub:
for s1 in self.srv.values():
for s2 in self.srv.values():
if s1.idx != s2.idx:
continue
if s1.ip not in s2.ips:
s2.ips[s1.ip] = s1.net
if self.args.zm_mnic:
for s1 in self.srv.values():
for s2 in self.srv.values():
for ip1, net1 in list(s1.ips.items()):
for ip2, net2 in list(s2.ips.items()):
if net1 == net2 and ip1 != ip2:
s1.ips[ip2] = net2
self.sips = set([x.split("/")[0] for x in all_selected])
for srv in self.srv.values():
assert srv.ip in self.sips
return bound
def setup_socket(self, srv: MC_Sck) -> None:
sck = srv.sck
if srv.v6:
if self.vinit:
zsl = list(srv.ips.keys())
self.log("v6({}) idx({}) {}".format(srv.ip, srv.idx, zsl), 6)
for ip in srv.ips:
bip = socket.inet_pton(socket.AF_INET6, ip)
self.b2srv[bip] = srv
self.b6.append(bip)
grp = self.grp6 if srv.idx else ""
try:
if MACOS:
raise Exception()
sck.bind((grp, self.port, 0, srv.idx))
except:
sck.bind(("", self.port, 0, srv.idx))
bgrp = socket.inet_pton(socket.AF_INET6, self.grp6)
dev = spack(b"@I", srv.idx)
srv.mreq = bgrp + dev
if srv.idx != socket.INADDR_ANY:
sck.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_MULTICAST_IF, dev)
try:
sck.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_MULTICAST_HOPS, 255)
sck.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_MULTICAST_LOOP, 1)
except:
# macos
t = "failed to set IPv6 TTL/LOOP; announcements may not survive multiple switches/routers"
self.log(t, 3)
else:
if self.vinit:
self.log("v4({}) idx({})".format(srv.ip, srv.idx), 6)
bip = socket.inet_aton(srv.ip)
self.b2srv[bip] = srv
self.b4.append(bip)
grp = self.grp4 if srv.idx else ""
try:
if MACOS:
raise Exception()
sck.bind((grp, self.port))
except:
sck.bind(("", self.port))
bgrp = socket.inet_aton(self.grp4)
dev = (
spack(b"=I", socket.INADDR_ANY)
if srv.idx == socket.INADDR_ANY
else socket.inet_aton(srv.ip)
)
srv.mreq = bgrp + dev
if srv.idx != socket.INADDR_ANY:
sck.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_IF, dev)
try:
sck.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, 255)
sck.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_LOOP, 1)
except:
# probably can't happen but dontcare if it does
t = "failed to set IPv4 TTL/LOOP; announcements may not survive multiple switches/routers"
self.log(t, 3)
self.hop(srv)
self.b4.sort(reverse=True)
self.b6.sort(reverse=True)
def hop(self, srv: MC_Sck) -> None:
"""rejoin to keepalive on routers/switches without igmp-snooping"""
sck = srv.sck
req = srv.mreq
if ":" in srv.ip:
try:
sck.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_LEAVE_GROUP, req)
# linux does leaves/joins twice with 0.2~1.05s spacing
time.sleep(1.2)
except:
pass
sck.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_JOIN_GROUP, req)
else:
try:
sck.setsockopt(socket.IPPROTO_IP, socket.IP_DROP_MEMBERSHIP, req)
time.sleep(1.2)
except:
pass
# t = "joining {} from ip {} idx {} with mreq {}"
# self.log(t.format(srv.grp, srv.ip, srv.idx, repr(srv.mreq)), 6)
sck.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, req)
def map_client(self, cip: str) -> Optional[MC_Sck]:
try:
return self.cscache[cip]
except:
pass
ret: Optional[MC_Sck] = None
v6 = ":" in cip
ci = IPv6Address(cip) if v6 else IPv4Address(cip)
for x in self.b6 if v6 else self.b4:
srv = self.b2srv[x]
if any([x for x in srv.ips.values() if ci in x]):
ret = srv
break
if not ret and cip in ("127.0.0.1", "::1"):
# just give it something
ret = list(self.srv.values())[0]
if not ret and cip.startswith("169.254"):
# idk how to map LL IPv4 msgs to nics;
# just pick one and hope for the best
lls = (
x
for x in self.srv.values()
if next((y for y in x.ips if y in self.ll_ok), None)
)
ret = next(lls, None)
if ret:
t = "new client on {} ({}): {}"
self.log(t.format(ret.name, ret.net, cip), 6)
else:
t = "could not map client {} to known subnet; maybe forwarded from another network?"
self.log(t.format(cip), 3)
if len(self.cscache) > 9000:
self.cscache = {}
self.cscache[cip] = ret
return ret

337
copyparty/smbd.py Normal file
View File

@@ -0,0 +1,337 @@
# coding: utf-8
import inspect
import logging
import os
import random
import stat
import sys
import time
from types import SimpleNamespace
from .__init__ import ANYWIN, EXE, TYPE_CHECKING
from .authsrv import LEELOO_DALLAS, VFS
from .bos import bos
from .util import Daemon, min_ex, pybin, runhook
if True: # pylint: disable=using-constant-test
from typing import Any, Union
if TYPE_CHECKING:
from .svchub import SvcHub
lg = logging.getLogger("smb")
debug, info, warning, error = (lg.debug, lg.info, lg.warning, lg.error)
class SMB(object):
def __init__(self, hub: "SvcHub") -> None:
self.hub = hub
self.args = hub.args
self.asrv = hub.asrv
self.log = hub.log
self.files: dict[int, tuple[float, str]] = {}
lg.setLevel(logging.DEBUG if self.args.smbvvv else logging.INFO)
for x in ["impacket", "impacket.smbserver"]:
lgr = logging.getLogger(x)
lgr.setLevel(logging.DEBUG if self.args.smbvv else logging.INFO)
try:
from impacket import smbserver
from impacket.ntlm import compute_lmhash, compute_nthash
except ImportError:
if EXE:
print("copyparty.exe cannot do SMB")
sys.exit(1)
m = "\033[36m\n{}\033[31m\n\nERROR: need 'impacket'; please run this command:\033[33m\n {} -m pip install --user impacket\n\033[0m"
print(m.format(min_ex(), pybin))
sys.exit(1)
# patch vfs into smbserver.os
fos = SimpleNamespace()
for k in os.__dict__:
try:
setattr(fos, k, getattr(os, k))
except:
pass
fos.close = self._close
fos.listdir = self._listdir
fos.mkdir = self._mkdir
fos.open = self._open
fos.remove = self._unlink
fos.rename = self._rename
fos.stat = self._stat
fos.unlink = self._unlink
fos.utime = self._utime
smbserver.os = fos
# ...and smbserver.os.path
fop = SimpleNamespace()
for k in os.path.__dict__:
try:
setattr(fop, k, getattr(os.path, k))
except:
pass
fop.exists = self._p_exists
fop.getsize = self._p_getsize
fop.isdir = self._p_isdir
smbserver.os.path = fop
if not self.args.smb_nwa_2:
fop.join = self._p_join
# other patches
smbserver.isInFileJail = self._is_in_file_jail
self._disarm()
ip = next((x for x in self.args.i if ":" not in x), None)
if not ip:
self.log("smb", "IPv6 not supported for SMB; listening on 0.0.0.0", 3)
ip = "0.0.0.0"
port = int(self.args.smb_port)
srv = smbserver.SimpleSMBServer(listenAddress=ip, listenPort=port)
ro = "no" if self.args.smbw else "yes" # (does nothing)
srv.addShare("A", "/", readOnly=ro)
srv.setSMB2Support(not self.args.smb1)
for name, pwd in self.asrv.acct.items():
for u, p in ((name, pwd), (pwd, "k")):
lmhash = compute_lmhash(p)
nthash = compute_nthash(p)
srv.addCredential(u, 0, lmhash, nthash)
chi = [random.randint(0, 255) for x in range(8)]
cha = "".join(["{:02x}".format(x) for x in chi])
srv.setSMBChallenge(cha)
self.srv = srv
self.stop = srv.stop
self.log("smb", "listening @ {}:{}".format(ip, port))
def nlog(self, msg: str, c: Union[int, str] = 0) -> None:
self.log("smb", msg, c)
def start(self) -> None:
Daemon(self.srv.start)
def _v2a(self, caller: str, vpath: str, *a: Any) -> tuple[VFS, str]:
vpath = vpath.replace("\\", "/").lstrip("/")
# cf = inspect.currentframe().f_back
# c1 = cf.f_back.f_code.co_name
# c2 = cf.f_code.co_name
debug('%s("%s", %s)\033[K\033[0m', caller, vpath, str(a))
# TODO find a way to grab `identity` in smbComSessionSetupAndX and smb2SessionSetup
vfs, rem = self.asrv.vfs.get(vpath, LEELOO_DALLAS, True, True)
return vfs, vfs.canonical(rem)
def _listdir(self, vpath: str, *a: Any, **ka: Any) -> list[str]:
vpath = vpath.replace("\\", "/").lstrip("/")
# caller = inspect.currentframe().f_back.f_code.co_name
debug('listdir("%s", %s)\033[K\033[0m', vpath, str(a))
vfs, rem = self.asrv.vfs.get(vpath, LEELOO_DALLAS, False, False)
_, vfs_ls, vfs_virt = vfs.ls(
rem, LEELOO_DALLAS, not self.args.no_scandir, [[False, False]]
)
dirs = [x[0] for x in vfs_ls if stat.S_ISDIR(x[1].st_mode)]
fils = [x[0] for x in vfs_ls if x[0] not in dirs]
ls = list(vfs_virt.keys()) + dirs + fils
if self.args.smb_nwa_1:
return ls
# clients crash somewhere around 65760 byte
ret = []
sz = 112 * 2 # ['.', '..']
for n, fn in enumerate(ls):
if sz >= 64000:
t = "listing only %d of %d files (%d byte); see impacket#1433"
warning(t, n, len(ls), sz)
break
nsz = len(fn.encode("utf-16", "replace"))
nsz = ((nsz + 7) // 8) * 8
sz += 104 + nsz
ret.append(fn)
return ret
def _open(
self, vpath: str, flags: int, *a: Any, chmod: int = 0o777, **ka: Any
) -> Any:
f_ro = os.O_RDONLY
if ANYWIN:
f_ro |= os.O_BINARY
wr = flags != f_ro
if wr and not self.args.smbw:
yeet("blocked write (no --smbw): " + vpath)
vfs, ap = self._v2a("open", vpath, *a)
if wr:
if not vfs.axs.uwrite:
yeet("blocked write (no-write-acc): " + vpath)
xbu = vfs.flags.get("xbu")
if xbu and not runhook(
self.nlog, xbu, ap, vpath, "", "", 0, 0, "1.7.6.2", 0, ""
):
yeet("blocked by xbu server config: " + vpath)
ret = bos.open(ap, flags, *a, mode=chmod, **ka)
if wr:
now = time.time()
nf = len(self.files)
if nf > 9000:
oldest = min([x[0] for x in self.files.values()])
cutoff = oldest + (now - oldest) / 2
self.files = {k: v for k, v in self.files.items() if v[0] > cutoff}
info("was tracking %d files, now %d", nf, len(self.files))
vpath = vpath.replace("\\", "/").lstrip("/")
self.files[ret] = (now, vpath)
return ret
def _close(self, fd: int) -> None:
os.close(fd)
if fd not in self.files:
return
_, vp = self.files.pop(fd)
vp, fn = os.path.split(vp)
vfs, rem = self.hub.asrv.vfs.get(vp, LEELOO_DALLAS, False, True)
vfs, rem = vfs.get_dbv(rem)
self.hub.up2k.hash_file(
vfs.realpath,
vfs.vpath,
vfs.flags,
rem,
fn,
"1.7.6.2",
time.time(),
"",
)
def _rename(self, vp1: str, vp2: str) -> None:
if not self.args.smbw:
yeet("blocked rename (no --smbw): " + vp1)
vp1 = vp1.lstrip("/")
vp2 = vp2.lstrip("/")
vfs2, ap2 = self._v2a("rename", vp2, vp1)
if not vfs2.axs.uwrite:
yeet("blocked rename (no-write-acc): " + vp2)
vfs1, _ = self.asrv.vfs.get(vp1, LEELOO_DALLAS, True, True)
if not vfs1.axs.umove:
yeet("blocked rename (no-move-acc): " + vp1)
self.hub.up2k.handle_mv(LEELOO_DALLAS, vp1, vp2)
try:
bos.makedirs(ap2)
except:
pass
def _mkdir(self, vpath: str) -> None:
if not self.args.smbw:
yeet("blocked mkdir (no --smbw): " + vpath)
vfs, ap = self._v2a("mkdir", vpath)
if not vfs.axs.uwrite:
yeet("blocked mkdir (no-write-acc): " + vpath)
return bos.mkdir(ap)
def _stat(self, vpath: str, *a: Any, **ka: Any) -> os.stat_result:
return bos.stat(self._v2a("stat", vpath, *a)[1], *a, **ka)
def _unlink(self, vpath: str) -> None:
if not self.args.smbw:
yeet("blocked delete (no --smbw): " + vpath)
# return bos.unlink(self._v2a("stat", vpath, *a)[1])
vfs, ap = self._v2a("delete", vpath)
if not vfs.axs.udel:
yeet("blocked delete (no-del-acc): " + vpath)
vpath = vpath.replace("\\", "/").lstrip("/")
self.hub.up2k.handle_rm(LEELOO_DALLAS, "1.7.6.2", [vpath], [])
def _utime(self, vpath: str, times: tuple[float, float]) -> None:
if not self.args.smbw:
yeet("blocked utime (no --smbw): " + vpath)
vfs, ap = self._v2a("utime", vpath)
if not vfs.axs.uwrite:
yeet("blocked utime (no-write-acc): " + vpath)
return bos.utime(ap, times)
def _p_exists(self, vpath: str) -> bool:
try:
bos.stat(self._v2a("p.exists", vpath)[1])
return True
except:
return False
def _p_getsize(self, vpath: str) -> int:
st = bos.stat(self._v2a("p.getsize", vpath)[1])
return st.st_size
def _p_isdir(self, vpath: str) -> bool:
try:
st = bos.stat(self._v2a("p.isdir", vpath)[1])
return stat.S_ISDIR(st.st_mode)
except:
return False
def _p_join(self, *a) -> str:
# impacket.smbserver reads globs from queryDirectoryRequest['Buffer']
# where somehow `fds.*` becomes `fds"*` so lets fix that
ret = os.path.join(*a)
return ret.replace('"', ".") # type: ignore
def _hook(self, *a: Any, **ka: Any) -> None:
src = inspect.currentframe().f_back.f_code.co_name
error("\033[31m%s:hook(%s)\033[0m", src, a)
raise Exception("nope")
def _disarm(self) -> None:
from impacket import smbserver
smbserver.os.chmod = self._hook
smbserver.os.chown = self._hook
smbserver.os.ftruncate = self._hook
smbserver.os.lchown = self._hook
smbserver.os.link = self._hook
smbserver.os.lstat = self._hook
smbserver.os.replace = self._hook
smbserver.os.scandir = self._hook
smbserver.os.symlink = self._hook
smbserver.os.truncate = self._hook
smbserver.os.walk = self._hook
smbserver.os.path.abspath = self._hook
smbserver.os.path.expanduser = self._hook
smbserver.os.path.getatime = self._hook
smbserver.os.path.getctime = self._hook
smbserver.os.path.getmtime = self._hook
smbserver.os.path.isabs = self._hook
smbserver.os.path.isfile = self._hook
smbserver.os.path.islink = self._hook
smbserver.os.path.realpath = self._hook
def _is_in_file_jail(self, *a: Any) -> bool:
# handled by vfs
return True
def yeet(msg: str) -> None:
info(msg)
raise Exception(msg)

210
copyparty/ssdp.py Normal file
View File

@@ -0,0 +1,210 @@
# coding: utf-8
from __future__ import print_function, unicode_literals
import re
import select
import socket
from email.utils import formatdate
from .__init__ import TYPE_CHECKING
from .multicast import MC_Sck, MCast
from .util import CachedSet, html_escape, min_ex
if TYPE_CHECKING:
from .broker_util import BrokerCli
from .httpcli import HttpCli
from .svchub import SvcHub
if True: # pylint: disable=using-constant-test
from typing import Optional, Union
GRP = "239.255.255.250"
class SSDP_Sck(MC_Sck):
def __init__(self, *a):
super(SSDP_Sck, self).__init__(*a)
self.hport = 0
class SSDPr(object):
"""generates http responses for httpcli"""
def __init__(self, broker: "BrokerCli") -> None:
self.broker = broker
self.args = broker.args
def reply(self, hc: "HttpCli") -> bool:
if hc.vpath.endswith("device.xml"):
return self.tx_device(hc)
hc.reply(b"unknown request", 400)
return False
def tx_device(self, hc: "HttpCli") -> bool:
zs = """
<?xml version="1.0"?>
<root xmlns="urn:schemas-upnp-org:device-1-0">
<specVersion>
<major>1</major>
<minor>0</minor>
</specVersion>
<URLBase>{}</URLBase>
<device>
<presentationURL>{}</presentationURL>
<deviceType>urn:schemas-upnp-org:device:Basic:1</deviceType>
<friendlyName>{}</friendlyName>
<modelDescription>file server</modelDescription>
<manufacturer>ed</manufacturer>
<manufacturerURL>https://ocv.me/</manufacturerURL>
<modelName>copyparty</modelName>
<modelURL>https://github.com/9001/copyparty/</modelURL>
<UDN>{}</UDN>
<serviceList>
<service>
<serviceType>urn:schemas-upnp-org:device:Basic:1</serviceType>
<serviceId>urn:schemas-upnp-org:device:Basic</serviceId>
<controlURL>/.cpr/ssdp/services.xml</controlURL>
<eventSubURL>/.cpr/ssdp/services.xml</eventSubURL>
<SCPDURL>/.cpr/ssdp/services.xml</SCPDURL>
</service>
</serviceList>
</device>
</root>"""
c = html_escape
sip, sport = hc.s.getsockname()[:2]
sip = sip.replace("::ffff:", "")
proto = "https" if self.args.https_only else "http"
ubase = "{}://{}:{}".format(proto, sip, sport)
zsl = self.args.zsl
url = zsl if "://" in zsl else ubase + "/" + zsl.lstrip("/")
name = "{} @ {}".format(self.args.doctitle, self.args.name)
zs = zs.strip().format(c(ubase), c(url), c(name), c(self.args.zsid))
hc.reply(zs.encode("utf-8", "replace"))
return False # close connectino
class SSDPd(MCast):
"""communicates with ssdp clients over multicast"""
def __init__(self, hub: "SvcHub", ngen: int) -> None:
al = hub.args
vinit = al.zsv and not al.zmv
super(SSDPd, self).__init__(
hub, SSDP_Sck, al.zs_on, al.zs_off, GRP, "", 1900, vinit
)
self.srv: dict[socket.socket, SSDP_Sck] = {}
self.logsrc = "SSDP-{}".format(ngen)
self.ngen = ngen
self.rxc = CachedSet(0.7)
self.txc = CachedSet(5) # win10: every 3 sec
self.ptn_st = re.compile(b"\nst: *upnp:rootdevice", re.I)
def log(self, msg: str, c: Union[int, str] = 0) -> None:
self.log_func(self.logsrc, msg, c)
def run(self) -> None:
try:
bound = self.create_servers()
except:
t = "no server IP matches the ssdp config\n{}"
self.log(t.format(min_ex()), 1)
bound = []
if not bound:
self.log("failed to announce copyparty services on the network", 3)
return
# find http port for this listening ip
for srv in self.srv.values():
tcps = self.hub.tcpsrv.bound
hp = next((x[1] for x in tcps if x[0] in ("0.0.0.0", srv.ip)), 0)
hp = hp or next((x[1] for x in tcps if x[0] == "::"), 0)
if not hp:
hp = tcps[0][1]
self.log("assuming port {} for {}".format(hp, srv.ip), 3)
srv.hport = hp
self.log("listening")
while self.running:
rdy = select.select(self.srv, [], [], self.args.z_chk or 180)
rx: list[socket.socket] = rdy[0] # type: ignore
self.rxc.cln()
buf = b""
addr = ("0", 0)
for sck in rx:
try:
buf, addr = sck.recvfrom(4096)
self.eat(buf, addr)
except:
if not self.running:
break
t = "{} {} \033[33m|{}| {}\n{}".format(
self.srv[sck].name, addr, len(buf), repr(buf)[2:-1], min_ex()
)
self.log(t, 6)
self.log("stopped", 2)
def stop(self) -> None:
self.running = False
for srv in self.srv.values():
try:
srv.sck.close()
except:
pass
self.srv = {}
def eat(self, buf: bytes, addr: tuple[str, int]) -> None:
cip = addr[0]
if cip.startswith("169.254") and not self.ll_ok:
return
if buf in self.rxc.c:
return
srv: Optional[SSDP_Sck] = self.map_client(cip) # type: ignore
if not srv:
return
self.rxc.add(buf)
if not buf.startswith(b"M-SEARCH * HTTP/1."):
return
if not self.ptn_st.search(buf):
return
if self.args.zsv:
t = "{} [{}] \033[36m{} \033[0m|{}|"
self.log(t.format(srv.name, srv.ip, cip, len(buf)), "90")
zs = """
HTTP/1.1 200 OK
CACHE-CONTROL: max-age=1800
DATE: {0}
EXT:
LOCATION: http://{1}:{2}/.cpr/ssdp/device.xml
OPT: "http://schemas.upnp.org/upnp/1/0/"; ns=01
01-NLS: {3}
SERVER: UPnP/1.0
ST: upnp:rootdevice
USN: {3}::upnp:rootdevice
BOOTID.UPNP.ORG: 0
CONFIGID.UPNP.ORG: 1
"""
v4 = srv.ip.replace("::ffff:", "")
zs = zs.format(formatdate(usegmt=True), v4, srv.hport, self.args.zsid)
zb = zs[1:].replace("\n", "\r\n").encode("utf-8", "replace")
srv.sck.sendto(zb, addr[:2])
if cip not in self.txc.c:
self.log("{} [{}] --> {}".format(srv.name, srv.ip, cip), "6")
self.txc.add(cip)
self.txc.cln()

View File

@@ -1,23 +1,30 @@
# coding: utf-8
from __future__ import print_function, unicode_literals
import stat
import tarfile
import threading
from .sutil import errdesc
from .util import Queue, fsenc, min_ex
from queue import Queue
from .bos import bos
from .sutil import StreamArc, errdesc
from .util import Daemon, fsenc, min_ex
if True: # pylint: disable=using-constant-test
from typing import Any, Generator, Optional
from .util import NamedLogger
class QFile(object):
class QFile(object): # inherit io.StringIO for painful typing
"""file-like object which buffers writes into a queue"""
def __init__(self):
self.q = Queue(64)
self.bq = []
def __init__(self) -> None:
self.q: Queue[Optional[bytes]] = Queue(64)
self.bq: list[bytes] = []
self.nq = 0
def write(self, buf):
def write(self, buf: Optional[bytes]) -> None:
if buf is None or self.nq >= 240 * 1024:
self.q.put(b"".join(self.bq))
self.bq = []
@@ -30,44 +37,52 @@ class QFile(object):
self.nq += len(buf)
class StreamTar(object):
class StreamTar(StreamArc):
"""construct in-memory tar file from the given path"""
def __init__(self, log, fgen, **kwargs):
def __init__(
self,
log: "NamedLogger",
fgen: Generator[dict[str, Any], None, None],
**kwargs: Any
):
super(StreamTar, self).__init__(log, fgen)
self.ci = 0
self.co = 0
self.qfile = QFile()
self.log = log
self.fgen = fgen
self.errf = None
self.errf: dict[str, Any] = {}
# python 3.8 changed to PAX_FORMAT as default,
# waste of space and don't care about the new features
fmt = tarfile.GNU_FORMAT
self.tar = tarfile.open(fileobj=self.qfile, mode="w|", format=fmt)
self.tar = tarfile.open(fileobj=self.qfile, mode="w|", format=fmt) # type: ignore
w = threading.Thread(target=self._gen, name="star-gen")
w.daemon = True
w.start()
Daemon(self._gen, "star-gen")
def gen(self):
while True:
buf = self.qfile.q.get()
if not buf:
break
def gen(self) -> Generator[Optional[bytes], None, None]:
try:
while True:
buf = self.qfile.q.get()
if not buf:
break
self.co += len(buf)
yield buf
self.co += len(buf)
yield buf
yield None
if self.errf:
bos.unlink(self.errf["ap"])
yield None
finally:
if self.errf:
bos.unlink(self.errf["ap"])
def ser(self, f):
def ser(self, f: dict[str, Any]) -> None:
name = f["vp"]
src = f["ap"]
fsi = f["st"]
if stat.S_ISDIR(fsi.st_mode):
return
inf = tarfile.TarInfo(name=name)
inf.mode = fsi.st_mode
inf.size = fsi.st_size
@@ -76,21 +91,21 @@ class StreamTar(object):
inf.gid = 0
self.ci += inf.size
with open(fsenc(src), "rb", 512 * 1024) as f:
self.tar.addfile(inf, f)
with open(fsenc(src), "rb", 512 * 1024) as fo:
self.tar.addfile(inf, fo)
def _gen(self):
def _gen(self) -> None:
errors = []
for f in self.fgen:
if "err" in f:
errors.append([f["vp"], f["err"]])
errors.append((f["vp"], f["err"]))
continue
try:
self.ser(f)
except Exception:
except:
ex = min_ex(5, True).replace("\n", "\n-- ")
errors.append([f["vp"], ex])
errors.append((f["vp"], ex))
if errors:
self.errf, txt = errdesc(errors)

View File

@@ -0,0 +1,5 @@
`dnslib` but heavily simplified/feature-stripped
L: MIT
Copyright (c) 2010 - 2017 Paul Chakravarti
https://github.com/paulc/dnslib/

View File

@@ -0,0 +1,11 @@
# coding: utf-8
"""
L: MIT
Copyright (c) 2010 - 2017 Paul Chakravarti
https://github.com/paulc/dnslib/tree/0.9.23
"""
from .dns import *
version = "0.9.23"

View File

@@ -0,0 +1,41 @@
# coding: utf-8
import types
class BimapError(Exception):
pass
class Bimap(object):
def __init__(self, name, forward, error=AttributeError):
self.name = name
self.error = error
self.forward = forward.copy()
self.reverse = dict([(v, k) for (k, v) in list(forward.items())])
def get(self, k, default=None):
try:
return self.forward[k]
except KeyError:
return default or str(k)
def __getitem__(self, k):
try:
return self.forward[k]
except KeyError:
if isinstance(self.error, types.FunctionType):
return self.error(self.name, k, True)
else:
raise self.error("%s: Invalid forward lookup: [%s]" % (self.name, k))
def __getattr__(self, k):
try:
if k == "__wrapped__":
raise AttributeError()
return self.reverse[k]
except KeyError:
if isinstance(self.error, types.FunctionType):
return self.error(self.name, k, False)
else:
raise self.error("%s: Invalid reverse lookup: [%s]" % (self.name, k))

View File

@@ -0,0 +1,15 @@
# coding: utf-8
from __future__ import print_function
def get_bits(data, offset, bits=1):
mask = ((1 << bits) - 1) << offset
return (data & mask) >> offset
def set_bits(data, value, offset, bits=1):
mask = ((1 << bits) - 1) << offset
clear = 0xFFFF ^ mask
data = (data & clear) | ((value << offset) & mask)
return data

View File

@@ -0,0 +1,56 @@
# coding: utf-8
import binascii
import struct
class BufferError(Exception):
pass
class Buffer(object):
def __init__(self, data=b""):
self.data = bytearray(data)
self.offset = 0
def remaining(self):
return len(self.data) - self.offset
def get(self, length):
if length > self.remaining():
raise BufferError(
"Not enough bytes [offset=%d,remaining=%d,requested=%d]"
% (self.offset, self.remaining(), length)
)
start = self.offset
end = self.offset + length
self.offset += length
return bytes(self.data[start:end])
def hex(self):
return binascii.hexlify(self.data)
def pack(self, fmt, *args):
self.offset += struct.calcsize(fmt)
self.data += struct.pack(fmt, *args)
def append(self, s):
self.offset += len(s)
self.data += s
def update(self, ptr, fmt, *args):
s = struct.pack(fmt, *args)
self.data[ptr : ptr + len(s)] = s
def unpack(self, fmt):
try:
data = self.get(struct.calcsize(fmt))
return struct.unpack(fmt, data)
except struct.error:
raise BufferError(
"Error unpacking struct '%s' <%s>"
% (fmt, binascii.hexlify(data).decode())
)
def __len__(self):
return len(self.data)

View File

@@ -0,0 +1,775 @@
# coding: utf-8
from __future__ import print_function
import binascii
from itertools import chain
from .bimap import Bimap, BimapError
from .bit import get_bits, set_bits
from .buffer import BufferError
from .label import DNSBuffer, DNSLabel
from .ranges import IP4, IP6, H, I, check_bytes
class DNSError(Exception):
pass
def unknown_qtype(name, key, forward):
if forward:
try:
return "TYPE%d" % (key,)
except:
raise DNSError("%s: Invalid forward lookup: [%s]" % (name, key))
else:
if key.startswith("TYPE"):
try:
return int(key[4:])
except:
pass
raise DNSError("%s: Invalid reverse lookup: [%s]" % (name, key))
QTYPE = Bimap(
"QTYPE",
{1: "A", 12: "PTR", 16: "TXT", 28: "AAAA", 33: "SRV", 47: "NSEC", 255: "ANY"},
unknown_qtype,
)
CLASS = Bimap("CLASS", {1: "IN", 254: "None", 255: "*", 0x8001: "F_IN"}, DNSError)
QR = Bimap("QR", {0: "QUERY", 1: "RESPONSE"}, DNSError)
RCODE = Bimap(
"RCODE",
{
0: "NOERROR",
1: "FORMERR",
2: "SERVFAIL",
3: "NXDOMAIN",
4: "NOTIMP",
5: "REFUSED",
6: "YXDOMAIN",
7: "YXRRSET",
8: "NXRRSET",
9: "NOTAUTH",
10: "NOTZONE",
},
DNSError,
)
OPCODE = Bimap(
"OPCODE", {0: "QUERY", 1: "IQUERY", 2: "STATUS", 4: "NOTIFY", 5: "UPDATE"}, DNSError
)
def label(label, origin=None):
if label.endswith("."):
return DNSLabel(label)
else:
return (origin if isinstance(origin, DNSLabel) else DNSLabel(origin)).add(label)
class DNSRecord(object):
@classmethod
def parse(cls, packet) -> "DNSRecord":
buffer = DNSBuffer(packet)
try:
header = DNSHeader.parse(buffer)
questions = []
rr = []
auth = []
ar = []
for i in range(header.q):
questions.append(DNSQuestion.parse(buffer))
for i in range(header.a):
rr.append(RR.parse(buffer))
for i in range(header.auth):
auth.append(RR.parse(buffer))
for i in range(header.ar):
ar.append(RR.parse(buffer))
return cls(header, questions, rr, auth=auth, ar=ar)
except (BufferError, BimapError) as e:
raise DNSError(
"Error unpacking DNSRecord [offset=%d]: %s" % (buffer.offset, e)
)
@classmethod
def question(cls, qname, qtype="A", qclass="IN"):
return DNSRecord(
q=DNSQuestion(qname, getattr(QTYPE, qtype), getattr(CLASS, qclass))
)
def __init__(
self, header=None, questions=None, rr=None, q=None, a=None, auth=None, ar=None
) -> None:
self.header = header or DNSHeader()
self.questions: list[DNSQuestion] = questions or []
self.rr: list[RR] = rr or []
self.auth: list[RR] = auth or []
self.ar: list[RR] = ar or []
if q:
self.questions.append(q)
if a:
self.rr.append(a)
self.set_header_qa()
def reply(self, ra=1, aa=1):
return DNSRecord(
DNSHeader(id=self.header.id, bitmap=self.header.bitmap, qr=1, ra=ra, aa=aa),
q=self.q,
)
def add_question(self, *q) -> None:
self.questions.extend(q)
self.set_header_qa()
def add_answer(self, *rr) -> None:
self.rr.extend(rr)
self.set_header_qa()
def add_auth(self, *auth) -> None:
self.auth.extend(auth)
self.set_header_qa()
def add_ar(self, *ar) -> None:
self.ar.extend(ar)
self.set_header_qa()
def set_header_qa(self) -> None:
self.header.q = len(self.questions)
self.header.a = len(self.rr)
self.header.auth = len(self.auth)
self.header.ar = len(self.ar)
def get_q(self):
return self.questions[0] if self.questions else DNSQuestion()
q = property(get_q)
def get_a(self):
return self.rr[0] if self.rr else RR()
a = property(get_a)
def pack(self) -> bytes:
self.set_header_qa()
buffer = DNSBuffer()
self.header.pack(buffer)
for q in self.questions:
q.pack(buffer)
for rr in self.rr:
rr.pack(buffer)
for auth in self.auth:
auth.pack(buffer)
for ar in self.ar:
ar.pack(buffer)
return buffer.data
def truncate(self):
return DNSRecord(DNSHeader(id=self.header.id, bitmap=self.header.bitmap, tc=1))
def format(self, prefix="", sort=False):
s = sorted if sort else lambda x: x
sections = [repr(self.header)]
sections.extend(s([repr(q) for q in self.questions]))
sections.extend(s([repr(rr) for rr in self.rr]))
sections.extend(s([repr(rr) for rr in self.auth]))
sections.extend(s([repr(rr) for rr in self.ar]))
return prefix + ("\n" + prefix).join(sections)
short = format
def __repr__(self):
return self.format()
__str__ = __repr__
class DNSHeader(object):
id = H("id")
bitmap = H("bitmap")
q = H("q")
a = H("a")
auth = H("auth")
ar = H("ar")
@classmethod
def parse(cls, buffer):
try:
(id, bitmap, q, a, auth, ar) = buffer.unpack("!HHHHHH")
return cls(id, bitmap, q, a, auth, ar)
except (BufferError, BimapError) as e:
raise DNSError(
"Error unpacking DNSHeader [offset=%d]: %s" % (buffer.offset, e)
)
def __init__(self, id=None, bitmap=None, q=0, a=0, auth=0, ar=0, **args) -> None:
self.id = id if id else 0
if bitmap is None:
self.bitmap = 0
else:
self.bitmap = bitmap
self.q = q
self.a = a
self.auth = auth
self.ar = ar
for k, v in args.items():
if k.lower() == "qr":
self.qr = v
elif k.lower() == "opcode":
self.opcode = v
elif k.lower() == "aa":
self.aa = v
elif k.lower() == "tc":
self.tc = v
elif k.lower() == "rd":
self.rd = v
elif k.lower() == "ra":
self.ra = v
elif k.lower() == "z":
self.z = v
elif k.lower() == "ad":
self.ad = v
elif k.lower() == "cd":
self.cd = v
elif k.lower() == "rcode":
self.rcode = v
def get_qr(self):
return get_bits(self.bitmap, 15)
def set_qr(self, val):
self.bitmap = set_bits(self.bitmap, val, 15)
qr = property(get_qr, set_qr)
def get_opcode(self):
return get_bits(self.bitmap, 11, 4)
def set_opcode(self, val):
self.bitmap = set_bits(self.bitmap, val, 11, 4)
opcode = property(get_opcode, set_opcode)
def get_aa(self):
return get_bits(self.bitmap, 10)
def set_aa(self, val):
self.bitmap = set_bits(self.bitmap, val, 10)
aa = property(get_aa, set_aa)
def get_tc(self):
return get_bits(self.bitmap, 9)
def set_tc(self, val):
self.bitmap = set_bits(self.bitmap, val, 9)
tc = property(get_tc, set_tc)
def get_rd(self):
return get_bits(self.bitmap, 8)
def set_rd(self, val):
self.bitmap = set_bits(self.bitmap, val, 8)
rd = property(get_rd, set_rd)
def get_ra(self):
return get_bits(self.bitmap, 7)
def set_ra(self, val):
self.bitmap = set_bits(self.bitmap, val, 7)
ra = property(get_ra, set_ra)
def get_z(self):
return get_bits(self.bitmap, 6)
def set_z(self, val):
self.bitmap = set_bits(self.bitmap, val, 6)
z = property(get_z, set_z)
def get_ad(self):
return get_bits(self.bitmap, 5)
def set_ad(self, val):
self.bitmap = set_bits(self.bitmap, val, 5)
ad = property(get_ad, set_ad)
def get_cd(self):
return get_bits(self.bitmap, 4)
def set_cd(self, val):
self.bitmap = set_bits(self.bitmap, val, 4)
cd = property(get_cd, set_cd)
def get_rcode(self):
return get_bits(self.bitmap, 0, 4)
def set_rcode(self, val):
self.bitmap = set_bits(self.bitmap, val, 0, 4)
rcode = property(get_rcode, set_rcode)
def pack(self, buffer):
buffer.pack("!HHHHHH", self.id, self.bitmap, self.q, self.a, self.auth, self.ar)
def __repr__(self):
f = [
self.aa and "AA",
self.tc and "TC",
self.rd and "RD",
self.ra and "RA",
self.z and "Z",
self.ad and "AD",
self.cd and "CD",
]
if OPCODE.get(self.opcode) == "UPDATE":
f1 = "zo"
f2 = "pr"
f3 = "up"
f4 = "ad"
else:
f1 = "q"
f2 = "a"
f3 = "ns"
f4 = "ar"
return (
"<DNS Header: id=0x%x type=%s opcode=%s flags=%s "
"rcode='%s' %s=%d %s=%d %s=%d %s=%d>"
% (
self.id,
QR.get(self.qr),
OPCODE.get(self.opcode),
",".join(filter(None, f)),
RCODE.get(self.rcode),
f1,
self.q,
f2,
self.a,
f3,
self.auth,
f4,
self.ar,
)
)
__str__ = __repr__
class DNSQuestion(object):
@classmethod
def parse(cls, buffer):
try:
qname = buffer.decode_name()
qtype, qclass = buffer.unpack("!HH")
return cls(qname, qtype, qclass)
except (BufferError, BimapError) as e:
raise DNSError(
"Error unpacking DNSQuestion [offset=%d]: %s" % (buffer.offset, e)
)
def __init__(self, qname=None, qtype=1, qclass=1) -> None:
self.qname = qname
self.qtype = qtype
self.qclass = qclass
def set_qname(self, qname):
if isinstance(qname, DNSLabel):
self._qname = qname
else:
self._qname = DNSLabel(qname)
def get_qname(self):
return self._qname
qname = property(get_qname, set_qname)
def pack(self, buffer):
buffer.encode_name(self.qname)
buffer.pack("!HH", self.qtype, self.qclass)
def __repr__(self):
return "<DNS Question: '%s' qtype=%s qclass=%s>" % (
self.qname,
QTYPE.get(self.qtype),
CLASS.get(self.qclass),
)
__str__ = __repr__
class RR(object):
rtype = H("rtype")
rclass = H("rclass")
ttl = I("ttl")
rdlength = H("rdlength")
@classmethod
def parse(cls, buffer):
try:
rname = buffer.decode_name()
rtype, rclass, ttl, rdlength = buffer.unpack("!HHIH")
if rdlength:
rdata = RDMAP.get(QTYPE.get(rtype), RD).parse(buffer, rdlength)
else:
rdata = ""
return cls(rname, rtype, rclass, ttl, rdata)
except (BufferError, BimapError) as e:
raise DNSError("Error unpacking RR [offset=%d]: %s" % (buffer.offset, e))
def __init__(self, rname=None, rtype=1, rclass=1, ttl=0, rdata=None) -> None:
self.rname = rname
self.rtype = rtype
self.rclass = rclass
self.ttl = ttl
self.rdata = rdata
def set_rname(self, rname):
if isinstance(rname, DNSLabel):
self._rname = rname
else:
self._rname = DNSLabel(rname)
def get_rname(self):
return self._rname
rname = property(get_rname, set_rname)
def pack(self, buffer):
buffer.encode_name(self.rname)
buffer.pack("!HHI", self.rtype, self.rclass, self.ttl)
rdlength_ptr = buffer.offset
buffer.pack("!H", 0)
start = buffer.offset
self.rdata.pack(buffer)
end = buffer.offset
buffer.update(rdlength_ptr, "!H", end - start)
def __repr__(self):
return "<DNS RR: '%s' rtype=%s rclass=%s ttl=%d rdata='%s'>" % (
self.rname,
QTYPE.get(self.rtype),
CLASS.get(self.rclass),
self.ttl,
self.rdata,
)
__str__ = __repr__
class RD(object):
@classmethod
def parse(cls, buffer, length):
try:
data = buffer.get(length)
return cls(data)
except (BufferError, BimapError) as e:
raise DNSError("Error unpacking RD [offset=%d]: %s" % (buffer.offset, e))
def __init__(self, data=b"") -> None:
check_bytes("data", data)
self.data = bytes(data)
def pack(self, buffer):
buffer.append(self.data)
def __repr__(self):
if len(self.data) > 0:
return "\\# %d %s" % (
len(self.data),
binascii.hexlify(self.data).decode().upper(),
)
else:
return "\\# 0"
attrs = ("data",)
def _force_bytes(x):
if isinstance(x, bytes):
return x
else:
return x.encode()
class TXT(RD):
@classmethod
def parse(cls, buffer, length):
try:
data = list()
start_bo = buffer.offset
now_length = 0
while buffer.offset < start_bo + length:
(txtlength,) = buffer.unpack("!B")
if now_length + txtlength < length:
now_length += txtlength
data.append(buffer.get(txtlength))
else:
raise DNSError(
"Invalid TXT record: len(%d) > RD len(%d)" % (txtlength, length)
)
return cls(data)
except (BufferError, BimapError) as e:
raise DNSError("Error unpacking TXT [offset=%d]: %s" % (buffer.offset, e))
def __init__(self, data) -> None:
if type(data) in (tuple, list):
self.data = [_force_bytes(x) for x in data]
else:
self.data = [_force_bytes(data)]
if any([len(x) > 255 for x in self.data]):
raise DNSError("TXT record too long: %s" % self.data)
def pack(self, buffer):
for ditem in self.data:
if len(ditem) > 255:
raise DNSError("TXT record too long: %s" % ditem)
buffer.pack("!B", len(ditem))
buffer.append(ditem)
def __repr__(self):
return ",".join([repr(x) for x in self.data])
class A(RD):
data = IP4("data")
@classmethod
def parse(cls, buffer, length):
try:
data = buffer.unpack("!BBBB")
return cls(data)
except (BufferError, BimapError) as e:
raise DNSError("Error unpacking A [offset=%d]: %s" % (buffer.offset, e))
def __init__(self, data) -> None:
if type(data) in (tuple, list):
self.data = tuple(data)
else:
self.data = tuple(map(int, data.rstrip(".").split(".")))
def pack(self, buffer):
buffer.pack("!BBBB", *self.data)
def __repr__(self):
return "%d.%d.%d.%d" % self.data
def _parse_ipv6(a):
l, _, r = a.partition("::")
l_groups = list(chain(*[divmod(int(x, 16), 256) for x in l.split(":") if x]))
r_groups = list(chain(*[divmod(int(x, 16), 256) for x in r.split(":") if x]))
zeros = [0] * (16 - len(l_groups) - len(r_groups))
return tuple(l_groups + zeros + r_groups)
def _format_ipv6(a):
left = []
right = []
current = "left"
for i in range(0, 16, 2):
group = (a[i] << 8) + a[i + 1]
if current == "left":
if group == 0 and i < 14:
if (a[i + 2] << 8) + a[i + 3] == 0:
current = "right"
else:
left.append("0")
else:
left.append("%x" % group)
else:
if group == 0 and len(right) == 0:
pass
else:
right.append("%x" % group)
if len(left) < 8:
return ":".join(left) + "::" + ":".join(right)
else:
return ":".join(left)
class AAAA(RD):
data = IP6("data")
@classmethod
def parse(cls, buffer, length):
try:
data = buffer.unpack("!16B")
return cls(data)
except (BufferError, BimapError) as e:
raise DNSError("Error unpacking AAAA [offset=%d]: %s" % (buffer.offset, e))
def __init__(self, data) -> None:
if type(data) in (tuple, list):
self.data = tuple(data)
else:
self.data = _parse_ipv6(data)
def pack(self, buffer):
buffer.pack("!16B", *self.data)
def __repr__(self):
return _format_ipv6(self.data)
class CNAME(RD):
@classmethod
def parse(cls, buffer, length):
try:
label = buffer.decode_name()
return cls(label)
except (BufferError, BimapError) as e:
raise DNSError("Error unpacking CNAME [offset=%d]: %s" % (buffer.offset, e))
def __init__(self, label=None) -> None:
self.label = label
def set_label(self, label):
if isinstance(label, DNSLabel):
self._label = label
else:
self._label = DNSLabel(label)
def get_label(self):
return self._label
label = property(get_label, set_label)
def pack(self, buffer):
buffer.encode_name(self.label)
def __repr__(self):
return "%s" % (self.label)
attrs = ("label",)
class PTR(CNAME):
pass
class SRV(RD):
priority = H("priority")
weight = H("weight")
port = H("port")
@classmethod
def parse(cls, buffer, length):
try:
priority, weight, port = buffer.unpack("!HHH")
target = buffer.decode_name()
return cls(priority, weight, port, target)
except (BufferError, BimapError) as e:
raise DNSError("Error unpacking SRV [offset=%d]: %s" % (buffer.offset, e))
def __init__(self, priority=0, weight=0, port=0, target=None) -> None:
self.priority = priority
self.weight = weight
self.port = port
self.target = target
def set_target(self, target):
if isinstance(target, DNSLabel):
self._target = target
else:
self._target = DNSLabel(target)
def get_target(self):
return self._target
target = property(get_target, set_target)
def pack(self, buffer):
buffer.pack("!HHH", self.priority, self.weight, self.port)
buffer.encode_name(self.target)
def __repr__(self):
return "%d %d %d %s" % (self.priority, self.weight, self.port, self.target)
attrs = ("priority", "weight", "port", "target")
def decode_type_bitmap(type_bitmap):
rrlist = []
buf = DNSBuffer(type_bitmap)
while buf.remaining():
winnum, winlen = buf.unpack("BB")
bitmap = bytearray(buf.get(winlen))
for (pos, value) in enumerate(bitmap):
for i in range(8):
if (value << i) & 0x80:
bitpos = (256 * winnum) + (8 * pos) + i
rrlist.append(QTYPE[bitpos])
return rrlist
def encode_type_bitmap(rrlist):
rrlist = sorted([getattr(QTYPE, rr) for rr in rrlist])
buf = DNSBuffer()
curWindow = rrlist[0] // 256
bitmap = bytearray(32)
n = len(rrlist) - 1
for i, rr in enumerate(rrlist):
v = rr - curWindow * 256
bitmap[v // 8] |= 1 << (7 - v % 8)
if i == n or rrlist[i + 1] >= (curWindow + 1) * 256:
while bitmap[-1] == 0:
bitmap = bitmap[:-1]
buf.pack("BB", curWindow, len(bitmap))
buf.append(bitmap)
if i != n:
curWindow = rrlist[i + 1] // 256
bitmap = bytearray(32)
return buf.data
class NSEC(RD):
@classmethod
def parse(cls, buffer, length):
try:
end = buffer.offset + length
name = buffer.decode_name()
rrlist = decode_type_bitmap(buffer.get(end - buffer.offset))
return cls(name, rrlist)
except (BufferError, BimapError) as e:
raise DNSError("Error unpacking NSEC [offset=%d]: %s" % (buffer.offset, e))
def __init__(self, label, rrlist) -> None:
self.label = label
self.rrlist = rrlist
def set_label(self, label):
if isinstance(label, DNSLabel):
self._label = label
else:
self._label = DNSLabel(label)
def get_label(self):
return self._label
label = property(get_label, set_label)
def pack(self, buffer):
buffer.encode_name(self.label)
buffer.append(encode_type_bitmap(self.rrlist))
def __repr__(self):
return "%s %s" % (self.label, " ".join(self.rrlist))
attrs = ("label", "rrlist")
RDMAP = {"A": A, "AAAA": AAAA, "TXT": TXT, "PTR": PTR, "SRV": SRV, "NSEC": NSEC}

View File

@@ -0,0 +1,154 @@
# coding: utf-8
from __future__ import print_function
import re
from .bit import get_bits, set_bits
from .buffer import Buffer, BufferError
LDH = set(range(33, 127))
ESCAPE = re.compile(r"\\([0-9][0-9][0-9])")
class DNSLabelError(Exception):
pass
class DNSLabel(object):
def __init__(self, label):
if type(label) == DNSLabel:
self.label = label.label
elif type(label) in (list, tuple):
self.label = tuple(label)
else:
if not label or label in (b".", "."):
self.label = ()
elif type(label) is not bytes:
if type("") != type(b""):
label = ESCAPE.sub(lambda m: chr(int(m[1])), label)
self.label = tuple(label.encode("idna").rstrip(b".").split(b"."))
else:
if type("") == type(b""):
label = ESCAPE.sub(lambda m: chr(int(m.groups()[0])), label)
self.label = tuple(label.rstrip(b".").split(b"."))
def add(self, name):
new = DNSLabel(name)
if self.label:
new.label += self.label
return new
def idna(self):
return ".".join([s.decode("idna") for s in self.label]) + "."
def _decode(self, s):
if set(s).issubset(LDH):
return s.decode()
else:
return "".join([(chr(c) if (c in LDH) else "\\%03d" % c) for c in s])
def __str__(self):
return ".".join([self._decode(bytearray(s)) for s in self.label]) + "."
def __repr__(self):
return "<DNSLabel: '%s'>" % str(self)
def __hash__(self):
return hash(tuple(map(lambda x: x.lower(), self.label)))
def __ne__(self, other):
return not self == other
def __eq__(self, other):
if type(other) != DNSLabel:
return self.__eq__(DNSLabel(other))
else:
return [l.lower() for l in self.label] == [l.lower() for l in other.label]
def __len__(self):
return len(b".".join(self.label))
class DNSBuffer(Buffer):
def __init__(self, data=b""):
super(DNSBuffer, self).__init__(data)
self.names = {}
def decode_name(self, last=-1):
label = []
done = False
while not done:
(length,) = self.unpack("!B")
if get_bits(length, 6, 2) == 3:
self.offset -= 1
pointer = get_bits(self.unpack("!H")[0], 0, 14)
save = self.offset
if last == save:
raise BufferError(
"Recursive pointer in DNSLabel [offset=%d,pointer=%d,length=%d]"
% (self.offset, pointer, len(self.data))
)
if pointer < self.offset:
self.offset = pointer
else:
raise BufferError(
"Invalid pointer in DNSLabel [offset=%d,pointer=%d,length=%d]"
% (self.offset, pointer, len(self.data))
)
label.extend(self.decode_name(save).label)
self.offset = save
done = True
else:
if length > 0:
l = self.get(length)
try:
l.decode()
except UnicodeDecodeError:
raise BufferError("Invalid label <%s>" % l)
label.append(l)
else:
done = True
return DNSLabel(label)
def encode_name(self, name):
if not isinstance(name, DNSLabel):
name = DNSLabel(name)
if len(name) > 253:
raise DNSLabelError("Domain label too long: %r" % name)
name = list(name.label)
while name:
if tuple(name) in self.names:
pointer = self.names[tuple(name)]
pointer = set_bits(pointer, 3, 14, 2)
self.pack("!H", pointer)
return
else:
self.names[tuple(name)] = self.offset
element = name.pop(0)
if len(element) > 63:
raise DNSLabelError("Label component too long: %r" % element)
self.pack("!B", len(element))
self.append(element)
self.append(b"\x00")
def encode_name_nocompress(self, name):
if not isinstance(name, DNSLabel):
name = DNSLabel(name)
if len(name) > 253:
raise DNSLabelError("Domain label too long: %r" % name)
name = list(name.label)
while name:
element = name.pop(0)
if len(element) > 63:
raise DNSLabelError("Label component too long: %r" % element)
self.pack("!B", len(element))
self.append(element)
self.append(b"\x00")

View File

@@ -0,0 +1,105 @@
# coding: utf-8
from __future__ import print_function
import collections
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
class Lexer(object):
escape_chars = "\\"
escape = {"n": "\n", "t": "\t", "r": "\r"}
def __init__(self, f, debug=False):
if hasattr(f, "read"):
self.f = f
elif type(f) == str:
self.f = StringIO(f)
elif type(f) == bytes:
self.f = StringIO(f.decode())
else:
raise ValueError("Invalid input")
self.debug = debug
self.q = collections.deque()
self.state = self.lexStart
self.escaped = False
self.eof = False
def __iter__(self):
return self.parse()
def next_token(self):
if self.debug:
print("STATE", self.state)
(tok, self.state) = self.state()
return tok
def parse(self):
while self.state is not None and not self.eof:
tok = self.next_token()
if tok:
yield tok
def read(self, n=1):
s = ""
while self.q and n > 0:
s += self.q.popleft()
n -= 1
s += self.f.read(n)
if s == "":
self.eof = True
if self.debug:
print("Read: >%s<" % repr(s))
return s
def peek(self, n=1):
s = ""
i = 0
while len(self.q) > i and n > 0:
s += self.q[i]
i += 1
n -= 1
r = self.f.read(n)
if n > 0 and r == "":
self.eof = True
self.q.extend(r)
if self.debug:
print("Peek : >%s<" % repr(s + r))
return s + r
def pushback(self, s):
p = collections.deque(s)
p.extend(self.q)
self.q = p
def readescaped(self):
c = self.read(1)
if c in self.escape_chars:
self.escaped = True
n = self.peek(3)
if n.isdigit():
n = self.read(3)
if self.debug:
print("Escape: >%s<" % n)
return chr(int(n, 8))
elif n[0] in "x":
x = self.read(3)
if self.debug:
print("Escape: >%s<" % x)
return chr(int(x[1:], 16))
else:
c = self.read(1)
if self.debug:
print("Escape: >%s<" % c)
return self.escape.get(c, c)
else:
self.escaped = False
return c
def lexStart(self):
return (None, None)

View File

@@ -0,0 +1,81 @@
# coding: utf-8
import sys
if sys.version_info < (3,):
int_types = (
int,
long,
)
byte_types = (str, bytearray)
else:
int_types = (int,)
byte_types = (bytes, bytearray)
def check_instance(name, val, types):
if not isinstance(val, types):
raise ValueError(
"Attribute '%s' must be instance of %s [%s]" % (name, types, type(val))
)
def check_bytes(name, val):
return check_instance(name, val, byte_types)
def range_property(attr, min, max):
def getter(obj):
return getattr(obj, "_%s" % attr)
def setter(obj, val):
if isinstance(val, int_types) and min <= val <= max:
setattr(obj, "_%s" % attr, val)
else:
raise ValueError(
"Attribute '%s' must be between %d-%d [%s]" % (attr, min, max, val)
)
return property(getter, setter)
def B(attr):
return range_property(attr, 0, 255)
def H(attr):
return range_property(attr, 0, 65535)
def I(attr):
return range_property(attr, 0, 4294967295)
def ntuple_range(attr, n, min, max):
f = lambda x: isinstance(x, int_types) and min <= x <= max
def getter(obj):
return getattr(obj, "_%s" % attr)
def setter(obj, val):
if len(val) != n:
raise ValueError(
"Attribute '%s' must be tuple with %d elements [%s]" % (attr, n, val)
)
if all(map(f, val)):
setattr(obj, "_%s" % attr, val)
else:
raise ValueError(
"Attribute '%s' elements must be between %d-%d [%s]"
% (attr, min, max, val)
)
return property(getter, setter)
def IP4(attr):
return ntuple_range(attr, 4, 0, 255)
def IP6(attr):
return ntuple_range(attr, 16, 0, 255)

View File

@@ -0,0 +1,5 @@
`ifaddr` with py2.7 support enabled by make-sfx.sh which strips py3 hints using strip_hints and removes the `^if True:` blocks
L: BSD-2-Clause
Copyright (c) 2014 Stefan C. Mueller
https://github.com/pydron/ifaddr/

View File

@@ -0,0 +1,21 @@
# coding: utf-8
from __future__ import print_function, unicode_literals
"""
L: BSD-2-Clause
Copyright (c) 2014 Stefan C. Mueller
https://github.com/pydron/ifaddr/tree/0.2.0
"""
import os
from ._shared import IP, Adapter
if os.name == "nt":
from ._win32 import get_adapters
elif os.name == "posix":
from ._posix import get_adapters
else:
raise RuntimeError("Unsupported Operating System: %s" % os.name)
__all__ = ["Adapter", "IP", "get_adapters"]

View File

@@ -0,0 +1,84 @@
# coding: utf-8
from __future__ import print_function, unicode_literals
import collections
import ctypes.util
import os
import socket
import ipaddress
if True: # pylint: disable=using-constant-test
from typing import Iterable, Optional
from . import _shared as shared
from ._shared import U
class ifaddrs(ctypes.Structure):
pass
ifaddrs._fields_ = [
("ifa_next", ctypes.POINTER(ifaddrs)),
("ifa_name", ctypes.c_char_p),
("ifa_flags", ctypes.c_uint),
("ifa_addr", ctypes.POINTER(shared.sockaddr)),
("ifa_netmask", ctypes.POINTER(shared.sockaddr)),
]
libc = ctypes.CDLL(ctypes.util.find_library("socket" if os.uname()[0] == "SunOS" else "c"), use_errno=True) # type: ignore
def get_adapters(include_unconfigured: bool = False) -> Iterable[shared.Adapter]:
addr0 = addr = ctypes.POINTER(ifaddrs)()
retval = libc.getifaddrs(ctypes.byref(addr))
if retval != 0:
eno = ctypes.get_errno()
raise OSError(eno, os.strerror(eno))
ips = collections.OrderedDict()
def add_ip(adapter_name: str, ip: Optional[shared.IP]) -> None:
if adapter_name not in ips:
index = None # type: Optional[int]
try:
# Mypy errors on this when the Windows CI runs:
# error: Module has no attribute "if_nametoindex"
index = socket.if_nametoindex(adapter_name) # type: ignore
except (OSError, AttributeError):
pass
ips[adapter_name] = shared.Adapter(
adapter_name, adapter_name, [], index=index
)
if ip is not None:
ips[adapter_name].ips.append(ip)
while addr:
name = addr[0].ifa_name.decode(encoding="UTF-8")
ip_addr = shared.sockaddr_to_ip(addr[0].ifa_addr)
if ip_addr:
if addr[0].ifa_netmask and not addr[0].ifa_netmask[0].sa_familiy:
addr[0].ifa_netmask[0].sa_familiy = addr[0].ifa_addr[0].sa_familiy
netmask = shared.sockaddr_to_ip(addr[0].ifa_netmask)
if isinstance(netmask, tuple):
netmaskStr = U(netmask[0])
prefixlen = shared.ipv6_prefixlength(ipaddress.IPv6Address(netmaskStr))
else:
if netmask is None:
t = "sockaddr_to_ip({}) returned None"
raise Exception(t.format(addr[0].ifa_netmask))
netmaskStr = U("0.0.0.0/" + netmask)
prefixlen = ipaddress.IPv4Network(netmaskStr).prefixlen
ip = shared.IP(ip_addr, prefixlen, name)
add_ip(name, ip)
else:
if include_unconfigured:
add_ip(name, None)
addr = addr[0].ifa_next
libc.freeifaddrs(addr0)
return ips.values()

Some files were not shown because too many files have changed in this diff Show More