mirror of
https://github.com/9001/copyparty.git
synced 2025-10-26 01:23:54 +00:00
Compare commits
24 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
0265455cd1 | ||
|
|
afafc886a4 | ||
|
|
8a959f6ac4 | ||
|
|
1c3aa0d2c5 | ||
|
|
79b7d3316a | ||
|
|
fa7768583a | ||
|
|
faf49f6c15 | ||
|
|
765af31b83 | ||
|
|
b6a3c52d67 | ||
|
|
b025c2f660 | ||
|
|
e559a7c878 | ||
|
|
5c8855aafd | ||
|
|
b5fc537b89 | ||
|
|
14899d3a7c | ||
|
|
0ea7881652 | ||
|
|
ec29b59d1e | ||
|
|
9405597c15 | ||
|
|
82441978c6 | ||
|
|
e0e6291bdb | ||
|
|
b2b083fd0a | ||
|
|
f8a51b68e7 | ||
|
|
e0a19108e5 | ||
|
|
770ea68ca8 | ||
|
|
ce36c52baf |
4
.vscode/settings.json
vendored
4
.vscode/settings.json
vendored
@@ -37,7 +37,7 @@
|
||||
"python.linting.banditEnabled": true,
|
||||
"python.linting.flake8Args": [
|
||||
"--max-line-length=120",
|
||||
"--ignore=E722,F405,E203,W503,W293",
|
||||
"--ignore=E722,F405,E203,W503,W293,E402",
|
||||
],
|
||||
"python.linting.banditArgs": [
|
||||
"--ignore=B104"
|
||||
@@ -55,6 +55,6 @@
|
||||
//
|
||||
// things you may wanna edit:
|
||||
//
|
||||
"python.pythonPath": ".venv/bin/python",
|
||||
"python.pythonPath": "/usr/bin/python3",
|
||||
//"python.linting.enabled": true,
|
||||
}
|
||||
15
README.md
15
README.md
@@ -38,10 +38,20 @@ turn your phone or raspi into a portable file server with resumable uploads/down
|
||||
* [x] accounts
|
||||
* [x] markdown viewer
|
||||
* [x] markdown editor
|
||||
* [x] FUSE client
|
||||
|
||||
summary: it works! you can use it! (but technically not even close to beta)
|
||||
|
||||
|
||||
# client examples
|
||||
|
||||
* javascript: dump some state into a file (two separate examples)
|
||||
* `await fetch('https://127.0.0.1:3923/', {method:"PUT", body: JSON.stringify(foo)});`
|
||||
* `var xhr = new XMLHttpRequest(); xhr.open('POST', 'https://127.0.0.1:3923/msgs?raw'); xhr.send('foo');`
|
||||
|
||||
* FUSE: mount a copyparty server as a local filesystem (see [./bin/](bin/))
|
||||
|
||||
|
||||
# dependencies
|
||||
|
||||
* `jinja2`
|
||||
@@ -57,10 +67,13 @@ currently there are two self-contained binaries:
|
||||
* `copyparty-sfx.sh` for unix (linux and osx) -- smaller, more robust
|
||||
* `copyparty-sfx.py` for windows (unix too) -- crossplatform, beta
|
||||
|
||||
launch either of them and it'll unpack and run copyparty, assuming you have python installed of course
|
||||
launch either of them (**use sfx.py on systemd**) and it'll unpack and run copyparty, assuming you have python installed of course
|
||||
|
||||
pls note that `copyparty-sfx.sh` will fail if you rename `copyparty-sfx.py` to `copyparty.py` and keep it in the same folder because `sys.path` is funky
|
||||
|
||||
|
||||
## sfx repack
|
||||
|
||||
if you don't need all the features you can repack the sfx and save a bunch of space; all you need is an sfx and a copy of this repo (nothing else to download or build, except for either msys2 or WSL if you're on windows)
|
||||
* `724K` original size as of v0.4.0
|
||||
* `256K` after `./scripts/make-sfx.sh re no-ogv`
|
||||
|
||||
34
bin/README.md
Normal file
34
bin/README.md
Normal file
@@ -0,0 +1,34 @@
|
||||
# copyparty-fuse.py
|
||||
* mount a copyparty server as a local filesystem (read-only)
|
||||
* **supports Windows!** -- expect `194 MiB/s` sequential read
|
||||
* **supports Linux** -- expect `117 MiB/s` sequential read
|
||||
* **supports macos** -- expect `85 MiB/s` sequential read
|
||||
|
||||
filecache is default-on for windows and macos;
|
||||
* macos readsize is 64kB, so speed ~32 MiB/s without the cache
|
||||
* windows readsize varies by software; explorer=1M, pv=32k
|
||||
|
||||
note that copyparty should run with `-ed` to enable dotfiles (hidden otherwise)
|
||||
|
||||
|
||||
## to run this on windows:
|
||||
* install [winfsp](https://github.com/billziss-gh/winfsp/releases/latest) and [python 3](https://www.python.org/downloads/)
|
||||
* [x] add python 3.x to PATH (it asks during install)
|
||||
* `python -m pip install --user fusepy`
|
||||
* `python ./copyparty-fuse.py n: http://192.168.1.69:3923/`
|
||||
|
||||
10% faster in [msys2](https://www.msys2.org/), 700% faster if debug prints are enabled:
|
||||
* `pacman -S mingw64/mingw-w64-x86_64-python{,-pip}`
|
||||
* `/mingw64/bin/python3 -m pip install --user fusepy`
|
||||
* `/mingw64/bin/python3 ./copyparty-fuse.py [...]`
|
||||
|
||||
you could replace winfsp with [dokan](https://github.com/dokan-dev/dokany/releases/latest), let me know if you [figure out how](https://github.com/dokan-dev/dokany/wiki/FUSE)
|
||||
(winfsp's sshfs leaks, doesn't look like winfsp itself does, should be fine)
|
||||
|
||||
|
||||
|
||||
# copyparty-fuse🅱️.py
|
||||
* mount a copyparty server as a local filesystem (read-only)
|
||||
* does the same thing except more correct, `samba` approves
|
||||
* **supports Linux** -- expect `18 MiB/s` (wait what)
|
||||
* **supports Macos** -- probably
|
||||
514
bin/copyparty-fuse.py
Normal file → Executable file
514
bin/copyparty-fuse.py
Normal file → Executable file
@@ -7,47 +7,80 @@ __copyright__ = 2019
|
||||
__license__ = "MIT"
|
||||
__url__ = "https://github.com/9001/copyparty/"
|
||||
|
||||
import re
|
||||
import sys
|
||||
import time
|
||||
import stat
|
||||
import errno
|
||||
import struct
|
||||
import threading
|
||||
import http.client # py2: httplib
|
||||
import urllib.parse
|
||||
from datetime import datetime
|
||||
from urllib.parse import quote_from_bytes as quote
|
||||
|
||||
try:
|
||||
from fuse import FUSE, FuseOSError, Operations
|
||||
except:
|
||||
print(
|
||||
"\n could not import fuse; these may help:\n python3 -m pip install --user fusepy\n apt install libfuse\n modprobe fuse"
|
||||
)
|
||||
raise
|
||||
|
||||
|
||||
"""
|
||||
mount a copyparty server (local or remote) as a filesystem
|
||||
|
||||
usage:
|
||||
python copyparty-fuse.py ./music http://192.168.1.69:1234/
|
||||
python copyparty-fuse.py ./music http://192.168.1.69:3923/
|
||||
|
||||
dependencies:
|
||||
sudo apk add fuse-dev
|
||||
python3 -m pip install --user fusepy
|
||||
|
||||
|
||||
MB/s
|
||||
28 cache NOthread
|
||||
24 cache thread
|
||||
29 cache NOthread NOmutex
|
||||
67 NOcache NOthread NOmutex ( ´・ω・) nyoro~n
|
||||
10 NOcache thread NOmutex
|
||||
+ on Linux: sudo apk add fuse
|
||||
+ on Macos: https://osxfuse.github.io/
|
||||
+ on Windows: https://github.com/billziss-gh/winfsp/releases/latest
|
||||
"""
|
||||
|
||||
|
||||
import re
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
import stat
|
||||
import errno
|
||||
import struct
|
||||
import builtins
|
||||
import platform
|
||||
import threading
|
||||
import traceback
|
||||
import http.client # py2: httplib
|
||||
import urllib.parse
|
||||
from datetime import datetime
|
||||
from urllib.parse import quote_from_bytes as quote
|
||||
|
||||
|
||||
DEBUG = False # ctrl-f this to configure logging
|
||||
|
||||
|
||||
WINDOWS = sys.platform == "win32"
|
||||
MACOS = platform.system() == "Darwin"
|
||||
|
||||
|
||||
try:
|
||||
from fuse import FUSE, FuseOSError, Operations
|
||||
except:
|
||||
if WINDOWS:
|
||||
libfuse = "install https://github.com/billziss-gh/winfsp/releases/latest"
|
||||
elif MACOS:
|
||||
libfuse = "install https://osxfuse.github.io/"
|
||||
else:
|
||||
libfuse = "apt install libfuse\n modprobe fuse"
|
||||
|
||||
print(
|
||||
"\n could not import fuse; these may help:"
|
||||
+ "\n python3 -m pip install --user fusepy\n "
|
||||
+ libfuse
|
||||
+ "\n"
|
||||
)
|
||||
raise
|
||||
|
||||
|
||||
def print(*args, **kwargs):
|
||||
try:
|
||||
builtins.print(*list(args), **kwargs)
|
||||
except:
|
||||
builtins.print(termsafe(" ".join(str(x) for x in args)), **kwargs)
|
||||
|
||||
|
||||
def termsafe(txt):
|
||||
try:
|
||||
return txt.encode(sys.stdout.encoding, "backslashreplace").decode(
|
||||
sys.stdout.encoding
|
||||
)
|
||||
except:
|
||||
return txt.encode(sys.stdout.encoding, "replace").decode(sys.stdout.encoding)
|
||||
|
||||
|
||||
def threadless_log(msg):
|
||||
print(msg + "\n", end="")
|
||||
|
||||
@@ -71,17 +104,89 @@ def null_log(msg):
|
||||
pass
|
||||
|
||||
|
||||
info = fancy_log
|
||||
log = fancy_log
|
||||
dbg = fancy_log
|
||||
log = null_log
|
||||
dbg = null_log
|
||||
class RecentLog(object):
|
||||
def __init__(self):
|
||||
self.mtx = threading.Lock()
|
||||
self.f = None # open("copyparty-fuse.log", "wb")
|
||||
self.q = []
|
||||
|
||||
thr = threading.Thread(target=self.printer)
|
||||
thr.daemon = True
|
||||
thr.start()
|
||||
|
||||
def put(self, msg):
|
||||
msg = "{} {}\n".format(rice_tid(), msg)
|
||||
if self.f:
|
||||
fmsg = " ".join([datetime.utcnow().strftime("%H%M%S.%f"), str(msg)])
|
||||
self.f.write(fmsg.encode("utf-8"))
|
||||
|
||||
with self.mtx:
|
||||
self.q.append(msg)
|
||||
if len(self.q) > 200:
|
||||
self.q = self.q[-50:]
|
||||
|
||||
def printer(self):
|
||||
while True:
|
||||
time.sleep(0.05)
|
||||
with self.mtx:
|
||||
q = self.q
|
||||
if not q:
|
||||
continue
|
||||
|
||||
self.q = []
|
||||
|
||||
print("".join(q), end="")
|
||||
|
||||
|
||||
if DEBUG:
|
||||
# debug=on,
|
||||
# windows terminals are slow (cmd.exe, mintty)
|
||||
# otoh fancy_log beats RecentLog on linux
|
||||
logger = RecentLog().put if WINDOWS else fancy_log
|
||||
|
||||
info = logger
|
||||
log = logger
|
||||
dbg = logger
|
||||
else:
|
||||
# debug=off, speed is dontcare
|
||||
info = fancy_log
|
||||
log = null_log
|
||||
dbg = null_log
|
||||
|
||||
|
||||
# [windows/cmd/cpy3] python dev\copyparty\bin\copyparty-fuse.py q: http://192.168.1.159:1234/
|
||||
# [windows/cmd/msys2] C:\msys64\mingw64\bin\python3 dev\copyparty\bin\copyparty-fuse.py q: http://192.168.1.159:1234/
|
||||
# [windows/mty/msys2] /mingw64/bin/python3 /c/Users/ed/dev/copyparty/bin/copyparty-fuse.py q: http://192.168.1.159:1234/
|
||||
#
|
||||
# [windows] find /q/music/albums/Phant*24bit -printf '%s %p\n' | sort -n | tail -n 8 | sed -r 's/^[0-9]+ //' | while IFS= read -r x; do dd if="$x" of=/dev/null bs=4k count=8192 & done
|
||||
# [alpine] ll t; for x in t/2020_0724_16{2,3}*; do dd if="$x" of=/dev/null bs=4k count=10240 & done
|
||||
#
|
||||
# 72.4983 windows mintty msys2 fancy_log
|
||||
# 219.5781 windows cmd msys2 fancy_log
|
||||
# nope.avi windows cmd cpy3 fancy_log
|
||||
# 9.8817 windows mintty msys2 RecentLog 200 50 0.1
|
||||
# 10.2241 windows cmd cpy3 RecentLog 200 50 0.1
|
||||
# 9.8494 windows cmd msys2 RecentLog 200 50 0.1
|
||||
# 7.8061 windows mintty msys2 fancy_log <info-only>
|
||||
# 7.9961 windows mintty msys2 RecentLog <info-only>
|
||||
# 4.2603 alpine xfce4 cpy3 RecentLog
|
||||
# 4.1538 alpine xfce4 cpy3 fancy_log
|
||||
# 3.1742 alpine urxvt cpy3 fancy_log
|
||||
|
||||
|
||||
def get_tid():
|
||||
return threading.current_thread().ident
|
||||
|
||||
|
||||
def html_dec(txt):
|
||||
return (
|
||||
txt.replace("<", "<")
|
||||
.replace(">", ">")
|
||||
.replace(""", '"')
|
||||
.replace("&", "&")
|
||||
)
|
||||
|
||||
|
||||
class CacheNode(object):
|
||||
def __init__(self, tag, data):
|
||||
self.tag = tag
|
||||
@@ -147,9 +252,8 @@ class Gateway(object):
|
||||
return c.getresponse()
|
||||
|
||||
def listdir(self, path):
|
||||
web_path = "/" + "/".join([self.web_root, path]) + "?dots"
|
||||
|
||||
r = self.sendreq("GET", self.quotep(web_path))
|
||||
web_path = self.quotep("/" + "/".join([self.web_root, path])) + "?dots"
|
||||
r = self.sendreq("GET", web_path)
|
||||
if r.status != 200:
|
||||
self.closeconn()
|
||||
raise Exception(
|
||||
@@ -158,14 +262,22 @@ class Gateway(object):
|
||||
)
|
||||
)
|
||||
|
||||
return self.parse_html(r)
|
||||
try:
|
||||
return self.parse_html(r)
|
||||
except:
|
||||
traceback.print_exc()
|
||||
raise
|
||||
|
||||
def download_file_range(self, path, ofs1, ofs2):
|
||||
web_path = "/" + "/".join([self.web_root, path]) + "?raw"
|
||||
web_path = self.quotep("/" + "/".join([self.web_root, path])) + "?raw"
|
||||
hdr_range = "bytes={}-{}".format(ofs1, ofs2 - 1)
|
||||
log("downloading {}".format(hdr_range))
|
||||
info(
|
||||
"DL {:4.0f}K\033[36m{:>9}-{:<9}\033[0m{}".format(
|
||||
(ofs2 - ofs1) / 1024.0, ofs1, ofs2 - 1, path
|
||||
)
|
||||
)
|
||||
|
||||
r = self.sendreq("GET", self.quotep(web_path), headers={"Range": hdr_range})
|
||||
r = self.sendreq("GET", web_path, headers={"Range": hdr_range})
|
||||
if r.status != http.client.PARTIAL_CONTENT:
|
||||
self.closeconn()
|
||||
raise Exception(
|
||||
@@ -203,8 +315,16 @@ class Gateway(object):
|
||||
continue
|
||||
|
||||
ftype, fname, fsize, fdate = m.groups()
|
||||
ts = datetime.strptime(fdate, "%Y-%m-%d %H:%M:%S").timestamp()
|
||||
sz = int(fsize)
|
||||
fname = html_dec(fname)
|
||||
sz = 1
|
||||
ts = 60 * 60 * 24 * 2
|
||||
try:
|
||||
sz = int(fsize)
|
||||
ts = datetime.strptime(fdate, "%Y-%m-%d %H:%M:%S").timestamp()
|
||||
except:
|
||||
info("bad HTML or OS [{}] [{}]".format(fdate, fsize))
|
||||
# python cannot strptime(1959-01-01) on windows
|
||||
|
||||
if ftype == "-":
|
||||
ret.append([fname, self.stat_file(ts, sz), 0])
|
||||
else:
|
||||
@@ -214,7 +334,7 @@ class Gateway(object):
|
||||
|
||||
def stat_dir(self, ts, sz=4096):
|
||||
return {
|
||||
"st_mode": 0o555 | stat.S_IFDIR,
|
||||
"st_mode": stat.S_IFDIR | 0o555,
|
||||
"st_uid": 1000,
|
||||
"st_gid": 1000,
|
||||
"st_size": sz,
|
||||
@@ -226,7 +346,7 @@ class Gateway(object):
|
||||
|
||||
def stat_file(self, ts, sz):
|
||||
return {
|
||||
"st_mode": 0o444 | stat.S_IFREG,
|
||||
"st_mode": stat.S_IFREG | 0o444,
|
||||
"st_uid": 1000,
|
||||
"st_gid": 1000,
|
||||
"st_size": sz,
|
||||
@@ -238,8 +358,11 @@ class Gateway(object):
|
||||
|
||||
|
||||
class CPPF(Operations):
|
||||
def __init__(self, base_url):
|
||||
def __init__(self, base_url, dircache, filecache):
|
||||
self.gw = Gateway(base_url)
|
||||
self.junk_fh_ctr = 3
|
||||
self.n_dircache = dircache
|
||||
self.n_filecache = filecache
|
||||
|
||||
self.dircache = []
|
||||
self.dircache_mtx = threading.Lock()
|
||||
@@ -249,12 +372,23 @@ class CPPF(Operations):
|
||||
|
||||
info("up")
|
||||
|
||||
def _describe(self):
|
||||
msg = ""
|
||||
with self.filecache_mtx:
|
||||
for n, cn in enumerate(self.filecache):
|
||||
cache_path, cache1 = cn.tag
|
||||
cache2 = cache1 + len(cn.data)
|
||||
msg += "\n{:<2} {:>7} {:>10}:{:<9} {}".format(
|
||||
n, len(cn.data), cache1, cache2, cache_path
|
||||
)
|
||||
return msg
|
||||
|
||||
def clean_dircache(self):
|
||||
"""not threadsafe"""
|
||||
now = time.time()
|
||||
cutoff = 0
|
||||
for cn in self.dircache:
|
||||
if now - cn.ts > 1:
|
||||
if now - cn.ts > self.n_dircache:
|
||||
cutoff += 1
|
||||
else:
|
||||
break
|
||||
@@ -263,8 +397,7 @@ class CPPF(Operations):
|
||||
self.dircache = self.dircache[cutoff:]
|
||||
|
||||
def get_cached_dir(self, dirpath):
|
||||
# with self.dircache_mtx:
|
||||
if True:
|
||||
with self.dircache_mtx:
|
||||
self.clean_dircache()
|
||||
for cn in self.dircache:
|
||||
if cn.tag == dirpath:
|
||||
@@ -301,9 +434,8 @@ class CPPF(Operations):
|
||||
car = None
|
||||
cdr = None
|
||||
ncn = -1
|
||||
# with self.filecache_mtx:
|
||||
if True:
|
||||
dbg("cache request from {} to {}, size {}".format(get1, get2, file_sz))
|
||||
dbg("cache request {}:{} |{}|".format(get1, get2, file_sz) + self._describe())
|
||||
with self.filecache_mtx:
|
||||
for cn in self.filecache:
|
||||
ncn += 1
|
||||
|
||||
@@ -313,6 +445,12 @@ class CPPF(Operations):
|
||||
|
||||
cache2 = cache1 + len(cn.data)
|
||||
if get2 <= cache1 or get1 >= cache2:
|
||||
# request does not overlap with cached area at all
|
||||
continue
|
||||
|
||||
if get1 < cache1 and get2 > cache2:
|
||||
# cached area does overlap, but must specifically contain
|
||||
# either the first or last byte in the requested range
|
||||
continue
|
||||
|
||||
if get1 >= cache1 and get2 <= cache2:
|
||||
@@ -323,7 +461,7 @@ class CPPF(Operations):
|
||||
buf_ofs = get1 - cache1
|
||||
buf_end = buf_ofs + (get2 - get1)
|
||||
dbg(
|
||||
"found all ({}, {} to {}, len {}) [{}:{}] = {}".format(
|
||||
"found all (#{} {}:{} |{}|) [{}:{}] = {}".format(
|
||||
ncn,
|
||||
cache1,
|
||||
cache2,
|
||||
@@ -335,11 +473,11 @@ class CPPF(Operations):
|
||||
)
|
||||
return cn.data[buf_ofs:buf_end]
|
||||
|
||||
if get2 < cache2:
|
||||
if get2 <= cache2:
|
||||
x = cn.data[: get2 - cache1]
|
||||
if not cdr or len(cdr) < len(x):
|
||||
dbg(
|
||||
"found car ({}, {} to {}, len {}) [:{}-{}] = [:{}] = {}".format(
|
||||
"found cdr (#{} {}:{} |{}|) [:{}-{}] = [:{}] = {}".format(
|
||||
ncn,
|
||||
cache1,
|
||||
cache2,
|
||||
@@ -354,11 +492,11 @@ class CPPF(Operations):
|
||||
|
||||
continue
|
||||
|
||||
if get1 > cache1:
|
||||
x = cn.data[-(cache2 - get1) :]
|
||||
if get1 >= cache1:
|
||||
x = cn.data[-(max(0, cache2 - get1)) :]
|
||||
if not car or len(car) < len(x):
|
||||
dbg(
|
||||
"found cdr ({}, {} to {}, len {}) [-({}-{}):] = [-{}:] = {}".format(
|
||||
"found car (#{} {}:{} |{}|) [-({}-{}):] = [-{}:] = {}".format(
|
||||
ncn,
|
||||
cache1,
|
||||
cache2,
|
||||
@@ -373,38 +511,52 @@ class CPPF(Operations):
|
||||
|
||||
continue
|
||||
|
||||
raise Exception("what")
|
||||
msg = "cache fallthrough\n{} {} {}\n{} {} {}\n{} {} --\n".format(
|
||||
get1,
|
||||
get2,
|
||||
get2 - get1,
|
||||
cache1,
|
||||
cache2,
|
||||
cache2 - cache1,
|
||||
get1 - cache1,
|
||||
get2 - cache2,
|
||||
)
|
||||
msg += self._describe()
|
||||
raise Exception(msg)
|
||||
|
||||
if car and cdr:
|
||||
if car and cdr and len(car) + len(cdr) == get2 - get1:
|
||||
dbg("<cache> have both")
|
||||
return car + cdr
|
||||
|
||||
ret = car + cdr
|
||||
if len(ret) == get2 - get1:
|
||||
return ret
|
||||
|
||||
raise Exception("{} + {} != {} - {}".format(len(car), len(cdr), get2, get1))
|
||||
|
||||
elif cdr:
|
||||
elif cdr and (not car or len(car) < len(cdr)):
|
||||
h_end = get1 + (get2 - get1) - len(cdr)
|
||||
h_ofs = h_end - 512 * 1024
|
||||
h_ofs = min(get1, h_end - 512 * 1024)
|
||||
|
||||
if h_ofs < 0:
|
||||
h_ofs = 0
|
||||
|
||||
buf_ofs = (get2 - get1) - len(cdr)
|
||||
buf_ofs = get1 - h_ofs
|
||||
|
||||
dbg(
|
||||
"<cache> cdr {}, car {}-{}={} [-{}:]".format(
|
||||
"<cache> cdr {}, car {}:{} |{}| [{}:]".format(
|
||||
len(cdr), h_ofs, h_end, h_end - h_ofs, buf_ofs
|
||||
)
|
||||
)
|
||||
|
||||
buf = self.gw.download_file_range(path, h_ofs, h_end)
|
||||
ret = buf[-buf_ofs:] + cdr
|
||||
if len(buf) == h_end - h_ofs:
|
||||
ret = buf[buf_ofs:] + cdr
|
||||
else:
|
||||
ret = buf[get1 - h_ofs :]
|
||||
info(
|
||||
"remote truncated {}:{} to |{}|, will return |{}|".format(
|
||||
h_ofs, h_end, len(buf), len(ret)
|
||||
)
|
||||
)
|
||||
|
||||
elif car:
|
||||
h_ofs = get1 + len(car)
|
||||
h_end = h_ofs + 1024 * 1024
|
||||
h_end = max(get2, h_ofs + 1024 * 1024)
|
||||
|
||||
if h_end > file_sz:
|
||||
h_end = file_sz
|
||||
@@ -412,7 +564,7 @@ class CPPF(Operations):
|
||||
buf_ofs = (get2 - get1) - len(car)
|
||||
|
||||
dbg(
|
||||
"<cache> car {}, cdr {}-{}={} [:{}]".format(
|
||||
"<cache> car {}, cdr {}:{} |{}| [:{}]".format(
|
||||
len(car), h_ofs, h_end, h_end - h_ofs, buf_ofs
|
||||
)
|
||||
)
|
||||
@@ -421,8 +573,13 @@ class CPPF(Operations):
|
||||
ret = car + buf[:buf_ofs]
|
||||
|
||||
else:
|
||||
h_ofs = get1 - 256 * 1024
|
||||
h_end = get2 + 1024 * 1024
|
||||
if get2 - get1 <= 1024 * 1024:
|
||||
h_ofs = get1 - 256 * 1024
|
||||
h_end = get2 + 1024 * 1024
|
||||
else:
|
||||
# big enough, doesn't need pads
|
||||
h_ofs = get1
|
||||
h_end = get2
|
||||
|
||||
if h_ofs < 0:
|
||||
h_ofs = 0
|
||||
@@ -434,7 +591,7 @@ class CPPF(Operations):
|
||||
buf_end = buf_ofs + get2 - get1
|
||||
|
||||
dbg(
|
||||
"<cache> {}-{}={} [{}:{}]".format(
|
||||
"<cache> {}:{} |{}| [{}:{}]".format(
|
||||
h_ofs, h_end, h_end - h_ofs, buf_ofs, buf_end
|
||||
)
|
||||
)
|
||||
@@ -443,48 +600,84 @@ class CPPF(Operations):
|
||||
ret = buf[buf_ofs:buf_end]
|
||||
|
||||
cn = CacheNode([path, h_ofs], buf)
|
||||
# with self.filecache_mtx:
|
||||
if True:
|
||||
if len(self.filecache) > 6:
|
||||
with self.filecache_mtx:
|
||||
if len(self.filecache) >= self.n_filecache:
|
||||
self.filecache = self.filecache[1:] + [cn]
|
||||
else:
|
||||
self.filecache.append(cn)
|
||||
|
||||
return ret
|
||||
|
||||
def readdir(self, path, fh=None):
|
||||
def _readdir(self, path, fh=None):
|
||||
path = path.strip("/")
|
||||
log("readdir {}".format(path))
|
||||
log("readdir [{}] [{}]".format(path, fh))
|
||||
|
||||
ret = self.gw.listdir(path)
|
||||
if not self.n_dircache:
|
||||
return ret
|
||||
|
||||
# with self.dircache_mtx:
|
||||
if True:
|
||||
with self.dircache_mtx:
|
||||
cn = CacheNode(path, ret)
|
||||
self.dircache.append(cn)
|
||||
self.clean_dircache()
|
||||
|
||||
return ret
|
||||
|
||||
def readdir(self, path, fh=None):
|
||||
return [".", ".."] + self._readdir(path, fh)
|
||||
|
||||
def read(self, path, length, offset, fh=None):
|
||||
req_max = 1024 * 1024 * 8
|
||||
cache_max = 1024 * 1024 * 2
|
||||
if length > req_max:
|
||||
# windows actually doing 240 MiB read calls, sausage
|
||||
info("truncate |{}| to {}MiB".format(length, req_max >> 20))
|
||||
length = req_max
|
||||
|
||||
path = path.strip("/")
|
||||
|
||||
ofs2 = offset + length
|
||||
log("read {} @ {} len {} end {}".format(path, offset, length, ofs2))
|
||||
|
||||
file_sz = self.getattr(path)["st_size"]
|
||||
log("read {} |{}| {}:{} max {}".format(path, length, offset, ofs2, file_sz))
|
||||
if ofs2 > file_sz:
|
||||
ofs2 = file_sz
|
||||
log("truncate to len {} end {}".format(ofs2 - offset, ofs2))
|
||||
log("truncate to |{}| :{}".format(ofs2 - offset, ofs2))
|
||||
|
||||
if file_sz == 0 or offset >= ofs2:
|
||||
return b""
|
||||
|
||||
# toggle cache here i suppose
|
||||
# return self.get_cached_file(path, offset, ofs2, file_sz)
|
||||
return self.gw.download_file_range(path, offset, ofs2)
|
||||
if self.n_filecache and length <= cache_max:
|
||||
ret = self.get_cached_file(path, offset, ofs2, file_sz)
|
||||
else:
|
||||
ret = self.gw.download_file_range(path, offset, ofs2)
|
||||
|
||||
return ret
|
||||
|
||||
fn = "cppf-{}-{}-{}".format(time.time(), offset, length)
|
||||
if False:
|
||||
with open(fn, "wb", len(ret)) as f:
|
||||
f.write(ret)
|
||||
elif self.n_filecache:
|
||||
ret2 = self.gw.download_file_range(path, offset, ofs2)
|
||||
if ret != ret2:
|
||||
info(fn)
|
||||
for v in [ret, ret2]:
|
||||
try:
|
||||
info(len(v))
|
||||
except:
|
||||
info("uhh " + repr(v))
|
||||
|
||||
with open(fn + ".bad", "wb") as f:
|
||||
f.write(ret)
|
||||
with open(fn + ".good", "wb") as f:
|
||||
f.write(ret2)
|
||||
|
||||
raise Exception("cache bork")
|
||||
|
||||
return ret
|
||||
|
||||
def getattr(self, path, fh=None):
|
||||
log("getattr [{}]".format(path))
|
||||
|
||||
path = path.strip("/")
|
||||
try:
|
||||
dirpath, fname = path.rsplit("/", 1)
|
||||
@@ -492,23 +685,25 @@ class CPPF(Operations):
|
||||
dirpath = ""
|
||||
fname = path
|
||||
|
||||
log("getattr {}".format(path))
|
||||
|
||||
if not path:
|
||||
return self.gw.stat_dir(time.time())
|
||||
ret = self.gw.stat_dir(time.time())
|
||||
# dbg("=" + repr(ret))
|
||||
return ret
|
||||
|
||||
cn = self.get_cached_dir(dirpath)
|
||||
if cn:
|
||||
log("cache ok")
|
||||
dents = cn.data
|
||||
else:
|
||||
log("cache miss")
|
||||
dents = self.readdir(dirpath)
|
||||
dbg("cache miss")
|
||||
dents = self._readdir(dirpath)
|
||||
|
||||
for cache_name, cache_stat, _ in dents:
|
||||
if cache_name == fname:
|
||||
# dbg("=" + repr(cache_stat))
|
||||
return cache_stat
|
||||
|
||||
info("=ENOENT ({})".format(path))
|
||||
raise FuseOSError(errno.ENOENT)
|
||||
|
||||
access = None
|
||||
@@ -521,17 +716,136 @@ class CPPF(Operations):
|
||||
releasedir = None
|
||||
statfs = None
|
||||
|
||||
if False:
|
||||
# incorrect semantics but good for debugging stuff like samba and msys2
|
||||
def access(self, path, mode):
|
||||
log("@@ access [{}] [{}]".format(path, mode))
|
||||
return 1 if self.getattr(path) else 0
|
||||
|
||||
def flush(self, path, fh):
|
||||
log("@@ flush [{}] [{}]".format(path, fh))
|
||||
return True
|
||||
|
||||
def getxattr(self, *args):
|
||||
log("@@ getxattr [{}]".format("] [".join(str(x) for x in args)))
|
||||
return False
|
||||
|
||||
def listxattr(self, *args):
|
||||
log("@@ listxattr [{}]".format("] [".join(str(x) for x in args)))
|
||||
return False
|
||||
|
||||
def open(self, path, flags):
|
||||
log("@@ open [{}] [{}]".format(path, flags))
|
||||
return 42
|
||||
|
||||
def opendir(self, fh):
|
||||
log("@@ opendir [{}]".format(fh))
|
||||
return 69
|
||||
|
||||
def release(self, ino, fi):
|
||||
log("@@ release [{}] [{}]".format(ino, fi))
|
||||
return True
|
||||
|
||||
def releasedir(self, ino, fi):
|
||||
log("@@ releasedir [{}] [{}]".format(ino, fi))
|
||||
return True
|
||||
|
||||
def statfs(self, path):
|
||||
log("@@ statfs [{}]".format(path))
|
||||
return {}
|
||||
|
||||
if sys.platform == "win32":
|
||||
# quick compat for /mingw64/bin/python3 (msys2)
|
||||
def _open(self, path):
|
||||
try:
|
||||
x = self.getattr(path)
|
||||
if x["st_mode"] <= 0:
|
||||
raise Exception()
|
||||
|
||||
self.junk_fh_ctr += 1
|
||||
if self.junk_fh_ctr > 32000: # TODO untested
|
||||
self.junk_fh_ctr = 4
|
||||
|
||||
return self.junk_fh_ctr
|
||||
|
||||
except Exception as ex:
|
||||
log("open ERR {}".format(repr(ex)))
|
||||
raise FuseOSError(errno.ENOENT)
|
||||
|
||||
def open(self, path, flags):
|
||||
dbg("open [{}] [{}]".format(path, flags))
|
||||
return self._open(path)
|
||||
|
||||
def opendir(self, path):
|
||||
dbg("opendir [{}]".format(path))
|
||||
return self._open(path)
|
||||
|
||||
def flush(self, path, fh):
|
||||
dbg("flush [{}] [{}]".format(path, fh))
|
||||
|
||||
def release(self, ino, fi):
|
||||
dbg("release [{}] [{}]".format(ino, fi))
|
||||
|
||||
def releasedir(self, ino, fi):
|
||||
dbg("releasedir [{}] [{}]".format(ino, fi))
|
||||
|
||||
def access(self, path, mode):
|
||||
dbg("access [{}] [{}]".format(path, mode))
|
||||
try:
|
||||
x = self.getattr(path)
|
||||
if x["st_mode"] <= 0:
|
||||
raise Exception()
|
||||
except:
|
||||
raise FuseOSError(errno.ENOENT)
|
||||
|
||||
|
||||
def main():
|
||||
# filecache helps for reads that are ~64k or smaller;
|
||||
# linux generally does 128k so the cache is a slowdown,
|
||||
# windows likes to use 4k and 64k so cache is required,
|
||||
# value is numChunks (1~3M each) to keep in the cache
|
||||
nf = 24 if WINDOWS or MACOS else 0
|
||||
|
||||
# dircache is always a boost,
|
||||
# only want to disable it for tests etc,
|
||||
# value is numSec until an entry goes stale
|
||||
nd = 1
|
||||
|
||||
try:
|
||||
local, remote = sys.argv[1:]
|
||||
local, remote = sys.argv[1:3]
|
||||
filecache = nf if len(sys.argv) <= 3 else int(sys.argv[3])
|
||||
dircache = nd if len(sys.argv) <= 4 else float(sys.argv[4])
|
||||
except:
|
||||
print("need arg 1: local directory")
|
||||
where = "local directory"
|
||||
if WINDOWS:
|
||||
where += " or DRIVE:"
|
||||
|
||||
print("need arg 1: " + where)
|
||||
print("need arg 2: root url")
|
||||
print("optional 3: num files in filecache ({})".format(nf))
|
||||
print("optional 4: num seconds / dircache ({})".format(nd))
|
||||
print()
|
||||
print("example:")
|
||||
print(" copyparty-fuse.py ./music http://192.168.1.69:3923/music/")
|
||||
if WINDOWS:
|
||||
print(" copyparty-fuse.py M: http://192.168.1.69:3923/music/")
|
||||
|
||||
return
|
||||
|
||||
FUSE(CPPF(remote), local, foreground=True, nothreads=True)
|
||||
# if nothreads=False also uncomment the `with *_mtx` things
|
||||
if WINDOWS:
|
||||
os.system("")
|
||||
|
||||
try:
|
||||
with open("/etc/fuse.conf", "rb") as f:
|
||||
allow_other = b"\nuser_allow_other" in f.read()
|
||||
except:
|
||||
allow_other = WINDOWS or MACOS
|
||||
|
||||
args = {"foreground": True, "nothreads": True, "allow_other": allow_other}
|
||||
if not MACOS:
|
||||
args["nonempty"] = True
|
||||
|
||||
FUSE(CPPF(remote, dircache, filecache), local, **args)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
590
bin/copyparty-fuseb.py
Executable file
590
bin/copyparty-fuseb.py
Executable file
@@ -0,0 +1,590 @@
|
||||
#!/usr/bin/env python3
|
||||
from __future__ import print_function, unicode_literals
|
||||
|
||||
"""copyparty-fuseb: remote copyparty as a local filesystem"""
|
||||
__author__ = "ed <copyparty@ocv.me>"
|
||||
__copyright__ = 2020
|
||||
__license__ = "MIT"
|
||||
__url__ = "https://github.com/9001/copyparty/"
|
||||
|
||||
import re
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
import stat
|
||||
import errno
|
||||
import struct
|
||||
import threading
|
||||
import http.client # py2: httplib
|
||||
import urllib.parse
|
||||
from datetime import datetime
|
||||
from urllib.parse import quote_from_bytes as quote
|
||||
|
||||
try:
|
||||
import fuse
|
||||
from fuse import Fuse
|
||||
|
||||
fuse.fuse_python_api = (0, 2)
|
||||
if not hasattr(fuse, "__version__"):
|
||||
raise Exception("your fuse-python is way old")
|
||||
except:
|
||||
print(
|
||||
"\n could not import fuse; these may help:\n python3 -m pip install --user fuse-python\n apt install libfuse\n modprobe fuse\n"
|
||||
)
|
||||
raise
|
||||
|
||||
|
||||
"""
|
||||
mount a copyparty server (local or remote) as a filesystem
|
||||
|
||||
usage:
|
||||
python ./copyparty-fuseb.py -f -o allow_other,auto_unmount,nonempty,url=http://192.168.1.69:3923 /mnt/nas
|
||||
|
||||
dependencies:
|
||||
sudo apk add fuse-dev python3-dev
|
||||
python3 -m pip install --user fuse-python
|
||||
|
||||
fork of copyparty-fuse.py based on fuse-python which
|
||||
appears to be more compliant than fusepy? since this works with samba
|
||||
(probably just my garbage code tbh)
|
||||
"""
|
||||
|
||||
|
||||
def threadless_log(msg):
|
||||
print(msg + "\n", end="")
|
||||
|
||||
|
||||
def boring_log(msg):
|
||||
msg = "\033[36m{:012x}\033[0m {}\n".format(threading.current_thread().ident, msg)
|
||||
print(msg[4:], end="")
|
||||
|
||||
|
||||
def rice_tid():
|
||||
tid = threading.current_thread().ident
|
||||
c = struct.unpack(b"B" * 5, struct.pack(b">Q", tid)[-5:])
|
||||
return "".join("\033[1;37;48;5;{}m{:02x}".format(x, x) for x in c) + "\033[0m"
|
||||
|
||||
|
||||
def fancy_log(msg):
|
||||
print("{} {}\n".format(rice_tid(), msg), end="")
|
||||
|
||||
|
||||
def null_log(msg):
|
||||
pass
|
||||
|
||||
|
||||
info = fancy_log
|
||||
log = fancy_log
|
||||
dbg = fancy_log
|
||||
log = null_log
|
||||
dbg = null_log
|
||||
|
||||
|
||||
def get_tid():
|
||||
return threading.current_thread().ident
|
||||
|
||||
|
||||
def html_dec(txt):
|
||||
return (
|
||||
txt.replace("<", "<")
|
||||
.replace(">", ">")
|
||||
.replace(""", '"')
|
||||
.replace("&", "&")
|
||||
)
|
||||
|
||||
|
||||
class CacheNode(object):
|
||||
def __init__(self, tag, data):
|
||||
self.tag = tag
|
||||
self.data = data
|
||||
self.ts = time.time()
|
||||
|
||||
|
||||
class Stat(fuse.Stat):
|
||||
def __init__(self):
|
||||
self.st_mode = 0
|
||||
self.st_ino = 0
|
||||
self.st_dev = 0
|
||||
self.st_nlink = 1
|
||||
self.st_uid = 1000
|
||||
self.st_gid = 1000
|
||||
self.st_size = 0
|
||||
self.st_atime = 0
|
||||
self.st_mtime = 0
|
||||
self.st_ctime = 0
|
||||
|
||||
|
||||
class Gateway(object):
|
||||
def __init__(self, base_url):
|
||||
self.base_url = base_url
|
||||
|
||||
ui = urllib.parse.urlparse(base_url)
|
||||
self.web_root = ui.path.strip("/")
|
||||
try:
|
||||
self.web_host, self.web_port = ui.netloc.split(":")
|
||||
self.web_port = int(self.web_port)
|
||||
except:
|
||||
self.web_host = ui.netloc
|
||||
if ui.scheme == "http":
|
||||
self.web_port = 80
|
||||
elif ui.scheme == "https":
|
||||
raise Exception("todo")
|
||||
else:
|
||||
raise Exception("bad url?")
|
||||
|
||||
self.conns = {}
|
||||
|
||||
def quotep(self, path):
|
||||
# TODO: mojibake support
|
||||
path = path.encode("utf-8", "ignore")
|
||||
return quote(path, safe="/")
|
||||
|
||||
def getconn(self, tid=None):
|
||||
tid = tid or get_tid()
|
||||
try:
|
||||
return self.conns[tid]
|
||||
except:
|
||||
info("new conn [{}] [{}]".format(self.web_host, self.web_port))
|
||||
|
||||
conn = http.client.HTTPConnection(self.web_host, self.web_port, timeout=260)
|
||||
|
||||
self.conns[tid] = conn
|
||||
return conn
|
||||
|
||||
def closeconn(self, tid=None):
|
||||
tid = tid or get_tid()
|
||||
try:
|
||||
self.conns[tid].close()
|
||||
del self.conns[tid]
|
||||
except:
|
||||
pass
|
||||
|
||||
def sendreq(self, *args, **kwargs):
|
||||
tid = get_tid()
|
||||
try:
|
||||
c = self.getconn(tid)
|
||||
c.request(*list(args), **kwargs)
|
||||
return c.getresponse()
|
||||
except:
|
||||
self.closeconn(tid)
|
||||
c = self.getconn(tid)
|
||||
c.request(*list(args), **kwargs)
|
||||
return c.getresponse()
|
||||
|
||||
def listdir(self, path):
|
||||
web_path = self.quotep("/" + "/".join([self.web_root, path])) + "?dots"
|
||||
r = self.sendreq("GET", web_path)
|
||||
if r.status != 200:
|
||||
self.closeconn()
|
||||
raise Exception(
|
||||
"http error {} reading dir {} in {}".format(
|
||||
r.status, web_path, rice_tid()
|
||||
)
|
||||
)
|
||||
|
||||
return self.parse_html(r)
|
||||
|
||||
def download_file_range(self, path, ofs1, ofs2):
|
||||
web_path = self.quotep("/" + "/".join([self.web_root, path])) + "?raw"
|
||||
hdr_range = "bytes={}-{}".format(ofs1, ofs2 - 1)
|
||||
log("downloading {}".format(hdr_range))
|
||||
|
||||
r = self.sendreq("GET", web_path, headers={"Range": hdr_range})
|
||||
if r.status != http.client.PARTIAL_CONTENT:
|
||||
self.closeconn()
|
||||
raise Exception(
|
||||
"http error {} reading file {} range {} in {}".format(
|
||||
r.status, web_path, hdr_range, rice_tid()
|
||||
)
|
||||
)
|
||||
|
||||
return r.read()
|
||||
|
||||
def parse_html(self, datasrc):
|
||||
ret = []
|
||||
remainder = b""
|
||||
ptn = re.compile(
|
||||
r"^<tr><td>(-|DIR)</td><td><a [^>]+>([^<]+)</a></td><td>([^<]+)</td><td>([^<]+)</td></tr>$"
|
||||
)
|
||||
|
||||
while True:
|
||||
buf = remainder + datasrc.read(4096)
|
||||
# print('[{}]'.format(buf.decode('utf-8')))
|
||||
if not buf:
|
||||
break
|
||||
|
||||
remainder = b""
|
||||
endpos = buf.rfind(b"\n")
|
||||
if endpos >= 0:
|
||||
remainder = buf[endpos + 1 :]
|
||||
buf = buf[:endpos]
|
||||
|
||||
lines = buf.decode("utf-8").split("\n")
|
||||
for line in lines:
|
||||
m = ptn.match(line)
|
||||
if not m:
|
||||
# print(line)
|
||||
continue
|
||||
|
||||
ftype, fname, fsize, fdate = m.groups()
|
||||
fname = html_dec(fname)
|
||||
ts = datetime.strptime(fdate, "%Y-%m-%d %H:%M:%S").timestamp()
|
||||
sz = int(fsize)
|
||||
if ftype == "-":
|
||||
ret.append([fname, self.stat_file(ts, sz), 0])
|
||||
else:
|
||||
ret.append([fname, self.stat_dir(ts, sz), 0])
|
||||
|
||||
return ret
|
||||
|
||||
def stat_dir(self, ts, sz=4096):
|
||||
ret = Stat()
|
||||
ret.st_mode = stat.S_IFDIR | 0o555
|
||||
ret.st_nlink = 2
|
||||
ret.st_size = sz
|
||||
ret.st_atime = ts
|
||||
ret.st_mtime = ts
|
||||
ret.st_ctime = ts
|
||||
return ret
|
||||
|
||||
def stat_file(self, ts, sz):
|
||||
ret = Stat()
|
||||
ret.st_mode = stat.S_IFREG | 0o444
|
||||
ret.st_size = sz
|
||||
ret.st_atime = ts
|
||||
ret.st_mtime = ts
|
||||
ret.st_ctime = ts
|
||||
return ret
|
||||
|
||||
|
||||
class CPPF(Fuse):
|
||||
def __init__(self, *args, **kwargs):
|
||||
Fuse.__init__(self, *args, **kwargs)
|
||||
|
||||
self.url = None
|
||||
|
||||
self.dircache = []
|
||||
self.dircache_mtx = threading.Lock()
|
||||
|
||||
self.filecache = []
|
||||
self.filecache_mtx = threading.Lock()
|
||||
|
||||
def init2(self):
|
||||
# TODO figure out how python-fuse wanted this to go
|
||||
self.gw = Gateway(self.url) # .decode('utf-8'))
|
||||
info("up")
|
||||
|
||||
def clean_dircache(self):
|
||||
"""not threadsafe"""
|
||||
now = time.time()
|
||||
cutoff = 0
|
||||
for cn in self.dircache:
|
||||
if now - cn.ts > 1:
|
||||
cutoff += 1
|
||||
else:
|
||||
break
|
||||
|
||||
if cutoff > 0:
|
||||
self.dircache = self.dircache[cutoff:]
|
||||
|
||||
def get_cached_dir(self, dirpath):
|
||||
# with self.dircache_mtx:
|
||||
if True:
|
||||
self.clean_dircache()
|
||||
for cn in self.dircache:
|
||||
if cn.tag == dirpath:
|
||||
return cn
|
||||
|
||||
return None
|
||||
|
||||
"""
|
||||
,-------------------------------, g1>=c1, g2<=c2
|
||||
|cache1 cache2| buf[g1-c1:(g1-c1)+(g2-g1)]
|
||||
`-------------------------------'
|
||||
,---------------,
|
||||
|get1 get2|
|
||||
`---------------'
|
||||
__________________________________________________________________________
|
||||
|
||||
,-------------------------------, g2<=c2, (g2>=c1)
|
||||
|cache1 cache2| cdr=buf[:g2-c1]
|
||||
`-------------------------------' dl car; g1-512K:c1
|
||||
,---------------,
|
||||
|get1 get2|
|
||||
`---------------'
|
||||
__________________________________________________________________________
|
||||
|
||||
,-------------------------------, g1>=c1, (g1<=c2)
|
||||
|cache1 cache2| car=buf[c2-g1:]
|
||||
`-------------------------------' dl cdr; c2:c2+1M
|
||||
,---------------,
|
||||
|get1 get2|
|
||||
`---------------'
|
||||
"""
|
||||
|
||||
def get_cached_file(self, path, get1, get2, file_sz):
|
||||
car = None
|
||||
cdr = None
|
||||
ncn = -1
|
||||
# with self.filecache_mtx:
|
||||
if True:
|
||||
dbg("cache request from {} to {}, size {}".format(get1, get2, file_sz))
|
||||
for cn in self.filecache:
|
||||
ncn += 1
|
||||
|
||||
cache_path, cache1 = cn.tag
|
||||
if cache_path != path:
|
||||
continue
|
||||
|
||||
cache2 = cache1 + len(cn.data)
|
||||
if get2 <= cache1 or get1 >= cache2:
|
||||
continue
|
||||
|
||||
if get1 >= cache1 and get2 <= cache2:
|
||||
# keep cache entry alive by moving it to the end
|
||||
self.filecache = (
|
||||
self.filecache[:ncn] + self.filecache[ncn + 1 :] + [cn]
|
||||
)
|
||||
buf_ofs = get1 - cache1
|
||||
buf_end = buf_ofs + (get2 - get1)
|
||||
dbg(
|
||||
"found all ({}, {} to {}, len {}) [{}:{}] = {}".format(
|
||||
ncn,
|
||||
cache1,
|
||||
cache2,
|
||||
len(cn.data),
|
||||
buf_ofs,
|
||||
buf_end,
|
||||
buf_end - buf_ofs,
|
||||
)
|
||||
)
|
||||
return cn.data[buf_ofs:buf_end]
|
||||
|
||||
if get2 < cache2:
|
||||
x = cn.data[: get2 - cache1]
|
||||
if not cdr or len(cdr) < len(x):
|
||||
dbg(
|
||||
"found car ({}, {} to {}, len {}) [:{}-{}] = [:{}] = {}".format(
|
||||
ncn,
|
||||
cache1,
|
||||
cache2,
|
||||
len(cn.data),
|
||||
get2,
|
||||
cache1,
|
||||
get2 - cache1,
|
||||
len(x),
|
||||
)
|
||||
)
|
||||
cdr = x
|
||||
|
||||
continue
|
||||
|
||||
if get1 > cache1:
|
||||
x = cn.data[-(cache2 - get1) :]
|
||||
if not car or len(car) < len(x):
|
||||
dbg(
|
||||
"found cdr ({}, {} to {}, len {}) [-({}-{}):] = [-{}:] = {}".format(
|
||||
ncn,
|
||||
cache1,
|
||||
cache2,
|
||||
len(cn.data),
|
||||
cache2,
|
||||
get1,
|
||||
cache2 - get1,
|
||||
len(x),
|
||||
)
|
||||
)
|
||||
car = x
|
||||
|
||||
continue
|
||||
|
||||
raise Exception("what")
|
||||
|
||||
if car and cdr:
|
||||
dbg("<cache> have both")
|
||||
|
||||
ret = car + cdr
|
||||
if len(ret) == get2 - get1:
|
||||
return ret
|
||||
|
||||
raise Exception("{} + {} != {} - {}".format(len(car), len(cdr), get2, get1))
|
||||
|
||||
elif cdr:
|
||||
h_end = get1 + (get2 - get1) - len(cdr)
|
||||
h_ofs = h_end - 512 * 1024
|
||||
|
||||
if h_ofs < 0:
|
||||
h_ofs = 0
|
||||
|
||||
buf_ofs = (get2 - get1) - len(cdr)
|
||||
|
||||
dbg(
|
||||
"<cache> cdr {}, car {}-{}={} [-{}:]".format(
|
||||
len(cdr), h_ofs, h_end, h_end - h_ofs, buf_ofs
|
||||
)
|
||||
)
|
||||
|
||||
buf = self.gw.download_file_range(path, h_ofs, h_end)
|
||||
ret = buf[-buf_ofs:] + cdr
|
||||
|
||||
elif car:
|
||||
h_ofs = get1 + len(car)
|
||||
h_end = h_ofs + 1024 * 1024
|
||||
|
||||
if h_end > file_sz:
|
||||
h_end = file_sz
|
||||
|
||||
buf_ofs = (get2 - get1) - len(car)
|
||||
|
||||
dbg(
|
||||
"<cache> car {}, cdr {}-{}={} [:{}]".format(
|
||||
len(car), h_ofs, h_end, h_end - h_ofs, buf_ofs
|
||||
)
|
||||
)
|
||||
|
||||
buf = self.gw.download_file_range(path, h_ofs, h_end)
|
||||
ret = car + buf[:buf_ofs]
|
||||
|
||||
else:
|
||||
h_ofs = get1 - 256 * 1024
|
||||
h_end = get2 + 1024 * 1024
|
||||
|
||||
if h_ofs < 0:
|
||||
h_ofs = 0
|
||||
|
||||
if h_end > file_sz:
|
||||
h_end = file_sz
|
||||
|
||||
buf_ofs = get1 - h_ofs
|
||||
buf_end = buf_ofs + get2 - get1
|
||||
|
||||
dbg(
|
||||
"<cache> {}-{}={} [{}:{}]".format(
|
||||
h_ofs, h_end, h_end - h_ofs, buf_ofs, buf_end
|
||||
)
|
||||
)
|
||||
|
||||
buf = self.gw.download_file_range(path, h_ofs, h_end)
|
||||
ret = buf[buf_ofs:buf_end]
|
||||
|
||||
cn = CacheNode([path, h_ofs], buf)
|
||||
# with self.filecache_mtx:
|
||||
if True:
|
||||
if len(self.filecache) > 6:
|
||||
self.filecache = self.filecache[1:] + [cn]
|
||||
else:
|
||||
self.filecache.append(cn)
|
||||
|
||||
return ret
|
||||
|
||||
def _readdir(self, path):
|
||||
path = path.strip("/")
|
||||
log("readdir {}".format(path))
|
||||
|
||||
ret = self.gw.listdir(path)
|
||||
|
||||
# with self.dircache_mtx:
|
||||
if True:
|
||||
cn = CacheNode(path, ret)
|
||||
self.dircache.append(cn)
|
||||
self.clean_dircache()
|
||||
|
||||
return ret
|
||||
|
||||
def readdir(self, path, offset):
|
||||
for e in self._readdir(path)[offset:]:
|
||||
# log("yield [{}]".format(e[0]))
|
||||
yield fuse.Direntry(e[0])
|
||||
|
||||
def open(self, path, flags):
|
||||
if (flags & (os.O_RDONLY | os.O_WRONLY | os.O_RDWR)) != os.O_RDONLY:
|
||||
return -errno.EACCES
|
||||
|
||||
st = self.getattr(path)
|
||||
try:
|
||||
if st.st_nlink > 0:
|
||||
return st
|
||||
except:
|
||||
return st # -int(os.errcode)
|
||||
|
||||
def read(self, path, length, offset, fh=None, *args):
|
||||
if args:
|
||||
log("unexpected args [" + "] [".join(repr(x) for x in args) + "]")
|
||||
raise Exception()
|
||||
|
||||
path = path.strip("/")
|
||||
|
||||
ofs2 = offset + length
|
||||
log("read {} @ {} len {} end {}".format(path, offset, length, ofs2))
|
||||
|
||||
st = self.getattr(path)
|
||||
try:
|
||||
file_sz = st.st_size
|
||||
except:
|
||||
return st # -int(os.errcode)
|
||||
|
||||
if ofs2 > file_sz:
|
||||
ofs2 = file_sz
|
||||
log("truncate to len {} end {}".format(ofs2 - offset, ofs2))
|
||||
|
||||
if file_sz == 0 or offset >= ofs2:
|
||||
return b""
|
||||
|
||||
# toggle cache here i suppose
|
||||
# return self.get_cached_file(path, offset, ofs2, file_sz)
|
||||
return self.gw.download_file_range(path, offset, ofs2)
|
||||
|
||||
def getattr(self, path):
|
||||
log("getattr [{}]".format(path))
|
||||
|
||||
path = path.strip("/")
|
||||
try:
|
||||
dirpath, fname = path.rsplit("/", 1)
|
||||
except:
|
||||
dirpath = ""
|
||||
fname = path
|
||||
|
||||
if not path:
|
||||
ret = self.gw.stat_dir(time.time())
|
||||
dbg("=root")
|
||||
return ret
|
||||
|
||||
cn = self.get_cached_dir(dirpath)
|
||||
if cn:
|
||||
log("cache ok")
|
||||
dents = cn.data
|
||||
else:
|
||||
log("cache miss")
|
||||
dents = self._readdir(dirpath)
|
||||
|
||||
for cache_name, cache_stat, _ in dents:
|
||||
if cache_name == fname:
|
||||
dbg("=file")
|
||||
return cache_stat
|
||||
|
||||
log("=404")
|
||||
return -errno.ENOENT
|
||||
|
||||
|
||||
def main():
|
||||
server = CPPF()
|
||||
server.parser.add_option(mountopt="url", metavar="BASE_URL", default=None)
|
||||
server.parse(values=server, errex=1)
|
||||
if not server.url or not str(server.url).startswith("http"):
|
||||
print("\nerror:")
|
||||
print(" need argument: -o url=<...>")
|
||||
print(" need argument: mount-path")
|
||||
print("example:")
|
||||
print(
|
||||
" ./copyparty-fuseb.py -f -o allow_other,auto_unmount,nonempty,url=http://192.168.1.69:3923 /mnt/nas"
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
server.init2()
|
||||
threading.Thread(target=server.main, daemon=True).start()
|
||||
while True:
|
||||
time.sleep(9001)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -118,7 +118,7 @@ printf ']}' >> /dev/shm/$salt.hs
|
||||
|
||||
printf '\033[36m'
|
||||
|
||||
#curl "http://$target:1234$posturl/handshake.php" -H "Content-Type: text/plain;charset=UTF-8" -H "Cookie: cppwd=$passwd" --data "$(cat "/dev/shm/$salt.hs")" | tee /dev/shm/$salt.res
|
||||
#curl "http://$target:3923$posturl/handshake.php" -H "Content-Type: text/plain;charset=UTF-8" -H "Cookie: cppwd=$passwd" --data "$(cat "/dev/shm/$salt.hs")" | tee /dev/shm/$salt.res
|
||||
|
||||
{
|
||||
{
|
||||
@@ -135,7 +135,7 @@ EOF
|
||||
cat /dev/shm/$salt.hs
|
||||
} |
|
||||
tee /dev/shm/$salt.hsb |
|
||||
ncat $target 1234 |
|
||||
ncat $target 3923 |
|
||||
tee /dev/shm/$salt.hs1r
|
||||
|
||||
wark="$(cat /dev/shm/$salt.hs1r | getwark)"
|
||||
@@ -190,7 +190,7 @@ EOF
|
||||
nchunk=$((nchunk+1))
|
||||
|
||||
done |
|
||||
ncat $target 1234 |
|
||||
ncat $target 3923 |
|
||||
tee /dev/shm/$salt.pr
|
||||
|
||||
t=$(date +%s.%N)
|
||||
@@ -201,7 +201,7 @@ t=$(date +%s.%N)
|
||||
|
||||
printf '\033[36m'
|
||||
|
||||
ncat $target 1234 < /dev/shm/$salt.hsb |
|
||||
ncat $target 3923 < /dev/shm/$salt.hsb |
|
||||
tee /dev/shm/$salt.hs2r |
|
||||
grep -E '"hash": ?\[ *\]'
|
||||
|
||||
|
||||
@@ -127,7 +127,7 @@ def main():
|
||||
"-c", metavar="PATH", type=str, action="append", help="add config file"
|
||||
)
|
||||
ap.add_argument("-i", metavar="IP", type=str, default="0.0.0.0", help="ip to bind")
|
||||
ap.add_argument("-p", metavar="PORT", type=int, default=1234, help="port to bind")
|
||||
ap.add_argument("-p", metavar="PORT", type=int, default=3923, help="port to bind")
|
||||
ap.add_argument("-nc", metavar="NUM", type=int, default=16, help="max num clients")
|
||||
ap.add_argument(
|
||||
"-j", metavar="CORES", type=int, default=1, help="max num cpu cores"
|
||||
@@ -137,6 +137,8 @@ def main():
|
||||
ap.add_argument("-q", action="store_true", help="quiet")
|
||||
ap.add_argument("-ed", action="store_true", help="enable ?dots")
|
||||
ap.add_argument("-nw", action="store_true", help="disable writes (benchmark)")
|
||||
ap.add_argument("-nih", action="store_true", help="no info hostname")
|
||||
ap.add_argument("-nid", action="store_true", help="no info disk-usage")
|
||||
al = ap.parse_args()
|
||||
|
||||
SvcHub(al).run()
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
# coding: utf-8
|
||||
|
||||
VERSION = (0, 4, 3)
|
||||
CODENAME = "NIH"
|
||||
BUILD_DT = (2020, 5, 17)
|
||||
VERSION = (0, 5, 1)
|
||||
CODENAME = "fuse jelly"
|
||||
BUILD_DT = (2020, 8, 17)
|
||||
|
||||
S_VERSION = ".".join(map(str, VERSION))
|
||||
S_BUILD_DT = "{0:04d}-{1:02d}-{2:02d}".format(*BUILD_DT)
|
||||
|
||||
@@ -135,9 +135,9 @@ class AuthSrv(object):
|
||||
self.warn_anonwrite = True
|
||||
|
||||
if WINDOWS:
|
||||
self.re_vol = re.compile(r"^([a-zA-Z]:[\\/][^:]*|[^:]*):([^:]*):(.*)")
|
||||
self.re_vol = re.compile(r"^([a-zA-Z]:[\\/][^:]*|[^:]*):([^:]*):(.*)$")
|
||||
else:
|
||||
self.re_vol = re.compile(r"^([^:]*):([^:]*):(.*)")
|
||||
self.re_vol = re.compile(r"^([^:]*):([^:]*):(.*)$")
|
||||
|
||||
self.mutex = threading.Lock()
|
||||
self.reload()
|
||||
@@ -220,12 +220,13 @@ class AuthSrv(object):
|
||||
if self.args.v:
|
||||
# list of src:dst:permset:permset:...
|
||||
# permset is [rwa]username
|
||||
for vol_match in [self.re_vol.match(x) for x in self.args.v]:
|
||||
try:
|
||||
src, dst, perms = vol_match.groups()
|
||||
except:
|
||||
raise Exception("invalid -v argument")
|
||||
for v_str in self.args.v:
|
||||
m = self.re_vol.match(v_str)
|
||||
if not m:
|
||||
raise Exception("invalid -v argument: [{}]".format(v_str))
|
||||
|
||||
src, dst, perms = m.groups()
|
||||
# print("\n".join([src, dst, perms]))
|
||||
src = fsdec(os.path.abspath(fsenc(src)))
|
||||
dst = dst.strip("/")
|
||||
mount[dst] = src
|
||||
|
||||
@@ -6,6 +6,8 @@ import stat
|
||||
import gzip
|
||||
import time
|
||||
import json
|
||||
import socket
|
||||
import ctypes
|
||||
from datetime import datetime
|
||||
import calendar
|
||||
|
||||
@@ -36,13 +38,13 @@ class HttpCli(object):
|
||||
|
||||
self.bufsz = 1024 * 32
|
||||
self.absolute_urls = False
|
||||
self.out_headers = {}
|
||||
self.out_headers = {"Access-Control-Allow-Origin": "*"}
|
||||
|
||||
def log(self, msg):
|
||||
self.log_func(self.log_src, msg)
|
||||
|
||||
def _check_nonfatal(self, ex):
|
||||
return ex.code in [403, 404]
|
||||
return ex.code in [404]
|
||||
|
||||
def _assert_safe_rem(self, rem):
|
||||
# sanity check to prevent any disasters
|
||||
@@ -128,6 +130,10 @@ class HttpCli(object):
|
||||
return self.handle_get() and self.keepalive
|
||||
elif self.mode == "POST":
|
||||
return self.handle_post() and self.keepalive
|
||||
elif self.mode == "PUT":
|
||||
return self.handle_put() and self.keepalive
|
||||
elif self.mode == "OPTIONS":
|
||||
return self.handle_options() and self.keepalive
|
||||
else:
|
||||
raise Pebkac(400, 'invalid HTTP mode "{0}"'.format(self.mode))
|
||||
|
||||
@@ -143,9 +149,7 @@ class HttpCli(object):
|
||||
def send_headers(self, length, status=200, mime=None, headers={}):
|
||||
response = ["HTTP/1.1 {} {}".format(status, HTTPCODE[status])]
|
||||
|
||||
if length is None:
|
||||
self.keepalive = False
|
||||
else:
|
||||
if length is not None:
|
||||
response.append("Content-Length: " + str(length))
|
||||
|
||||
# close if unknown length, otherwise take client's preference
|
||||
@@ -230,6 +234,30 @@ class HttpCli(object):
|
||||
|
||||
return self.tx_browser()
|
||||
|
||||
def handle_options(self):
|
||||
self.log("OPTIONS " + self.req)
|
||||
self.send_headers(
|
||||
None,
|
||||
204,
|
||||
headers={
|
||||
"Access-Control-Allow-Origin": "*",
|
||||
"Access-Control-Allow-Methods": "*",
|
||||
"Access-Control-Allow-Headers": "*",
|
||||
},
|
||||
)
|
||||
return True
|
||||
|
||||
def handle_put(self):
|
||||
self.log("PUT " + self.req)
|
||||
|
||||
if self.headers.get("expect", "").lower() == "100-continue":
|
||||
try:
|
||||
self.s.sendall(b"HTTP/1.1 100 Continue\r\n\r\n")
|
||||
except:
|
||||
raise Pebkac(400, "client d/c before 100 continue")
|
||||
|
||||
return self.handle_stash()
|
||||
|
||||
def handle_post(self):
|
||||
self.log("POST " + self.req)
|
||||
|
||||
@@ -243,6 +271,9 @@ class HttpCli(object):
|
||||
if not ctype:
|
||||
raise Pebkac(400, "you can't post without a content-type header")
|
||||
|
||||
if "raw" in self.uparam:
|
||||
return self.handle_stash()
|
||||
|
||||
if "multipart/form-data" in ctype:
|
||||
return self.handle_post_multipart()
|
||||
|
||||
@@ -255,6 +286,28 @@ class HttpCli(object):
|
||||
|
||||
raise Pebkac(405, "don't know how to handle {} POST".format(ctype))
|
||||
|
||||
def handle_stash(self):
|
||||
remains = int(self.headers.get("content-length", None))
|
||||
if remains is None:
|
||||
reader = read_socket_unbounded(self.sr)
|
||||
self.keepalive = False
|
||||
else:
|
||||
reader = read_socket(self.sr, remains)
|
||||
|
||||
vfs, rem = self.conn.auth.vfs.get(self.vpath, self.uname, False, True)
|
||||
fdir = os.path.join(vfs.realpath, rem)
|
||||
|
||||
addr = self.conn.addr[0].replace(":", ".")
|
||||
fn = "put-{:.6f}-{}.bin".format(time.time(), addr)
|
||||
path = os.path.join(fdir, fn)
|
||||
|
||||
with open(path, "wb", 512 * 1024) as f:
|
||||
post_sz, _, sha_b64 = hashcopy(self.conn, reader, f)
|
||||
|
||||
self.log("wrote {}/{} bytes to {}".format(post_sz, remains, path))
|
||||
self.reply("{}\n{}\n".format(post_sz, sha_b64).encode("utf-8"))
|
||||
return True
|
||||
|
||||
def handle_post_multipart(self):
|
||||
self.parser = MultipartParser(self.log, self.sr, self.headers)
|
||||
self.parser.parse()
|
||||
@@ -616,7 +669,16 @@ class HttpCli(object):
|
||||
|
||||
# if file exists, chekc that timestamp matches the client's
|
||||
if srv_lastmod >= 0:
|
||||
if cli_lastmod3 not in [-1, srv_lastmod3]:
|
||||
same_lastmod = cli_lastmod3 in [-1, srv_lastmod3]
|
||||
if not same_lastmod:
|
||||
# some filesystems/transports limit precision to 1sec, hopefully floored
|
||||
same_lastmod = (
|
||||
srv_lastmod == int(srv_lastmod)
|
||||
and cli_lastmod3 > srv_lastmod3
|
||||
and cli_lastmod3 - srv_lastmod3 < 1000
|
||||
)
|
||||
|
||||
if not same_lastmod:
|
||||
response = json.dumps(
|
||||
{
|
||||
"ok": False,
|
||||
@@ -804,6 +866,9 @@ class HttpCli(object):
|
||||
#
|
||||
# send reply
|
||||
|
||||
if not is_compressed:
|
||||
self.out_headers["Cache-Control"] = "no-cache"
|
||||
|
||||
self.out_headers["Accept-Ranges"] = "bytes"
|
||||
self.send_headers(
|
||||
length=upper - lower,
|
||||
@@ -1002,6 +1067,45 @@ class HttpCli(object):
|
||||
with open(fsenc(fn), "rb") as f:
|
||||
logues[n] = f.read().decode("utf-8")
|
||||
|
||||
if False:
|
||||
# this is a mistake
|
||||
md = None
|
||||
for fn in [x[2] for x in files]:
|
||||
if fn.lower() == "readme.md":
|
||||
fn = os.path.join(abspath, fn)
|
||||
with open(fn, "rb") as f:
|
||||
md = f.read().decode("utf-8")
|
||||
|
||||
break
|
||||
|
||||
srv_info = []
|
||||
|
||||
try:
|
||||
if not self.args.nih:
|
||||
srv_info.append(str(socket.gethostname()).split(".")[0])
|
||||
except:
|
||||
self.log("#wow #whoa")
|
||||
pass
|
||||
|
||||
try:
|
||||
# some fuses misbehave
|
||||
if not self.args.nid:
|
||||
if WINDOWS:
|
||||
bfree = ctypes.c_ulonglong(0)
|
||||
ctypes.windll.kernel32.GetDiskFreeSpaceExW(
|
||||
ctypes.c_wchar_p(abspath), None, None, ctypes.pointer(bfree)
|
||||
)
|
||||
srv_info.append(humansize(bfree.value) + " free")
|
||||
else:
|
||||
sv = os.statvfs(abspath)
|
||||
free = humansize(sv.f_frsize * sv.f_bfree, True)
|
||||
total = humansize(sv.f_frsize * sv.f_blocks, True)
|
||||
|
||||
srv_info.append(free + " free")
|
||||
srv_info.append(total)
|
||||
except:
|
||||
pass
|
||||
|
||||
ts = ""
|
||||
# ts = "?{}".format(time.time())
|
||||
|
||||
@@ -1016,6 +1120,7 @@ class HttpCli(object):
|
||||
prologue=logues[0],
|
||||
epilogue=logues[1],
|
||||
title=html_escape(self.vpath, quote=False),
|
||||
srv_info="</span> /// <span>".join(srv_info),
|
||||
)
|
||||
self.reply(html.encode("utf-8", "replace"))
|
||||
return True
|
||||
|
||||
@@ -86,7 +86,7 @@ class HttpConn(object):
|
||||
self.s.send(b"HTTP/1.1 400 Bad Request\r\n\r\n" + err.encode("utf-8"))
|
||||
return
|
||||
|
||||
if method not in [None, b"GET ", b"HEAD", b"POST"]:
|
||||
if method not in [None, b"GET ", b"HEAD", b"POST", b"PUT ", b"OPTI"]:
|
||||
if self.sr:
|
||||
self.log("\033[1;31mTODO: cannot do https in jython\033[0m")
|
||||
return
|
||||
|
||||
@@ -42,6 +42,7 @@ if WINDOWS and PY2:
|
||||
|
||||
HTTPCODE = {
|
||||
200: "OK",
|
||||
204: "No Content",
|
||||
206: "Partial Content",
|
||||
304: "Not Modified",
|
||||
400: "Bad Request",
|
||||
@@ -333,6 +334,21 @@ def read_header(sr):
|
||||
return ret[:ofs].decode("utf-8", "surrogateescape").split("\r\n")
|
||||
|
||||
|
||||
def humansize(sz, terse=False):
|
||||
for unit in ['B', 'KiB', 'MiB', 'GiB', 'TiB']:
|
||||
if sz < 1024:
|
||||
break
|
||||
|
||||
sz /= 1024
|
||||
|
||||
ret = ' '.join([str(sz)[:4].rstrip('.'), unit])
|
||||
|
||||
if not terse:
|
||||
return ret
|
||||
|
||||
return ret.replace('iB', '').replace(' ', '')
|
||||
|
||||
|
||||
def undot(path):
|
||||
ret = []
|
||||
for node in path.split("/"):
|
||||
@@ -445,6 +461,15 @@ def read_socket(sr, total_size):
|
||||
yield buf
|
||||
|
||||
|
||||
def read_socket_unbounded(sr):
|
||||
while True:
|
||||
buf = sr.recv(32 * 1024)
|
||||
if not buf:
|
||||
return
|
||||
|
||||
yield buf
|
||||
|
||||
|
||||
def hashcopy(actor, fin, fout):
|
||||
u32_lim = int((2 ** 31) * 0.9)
|
||||
hashobj = hashlib.sha512()
|
||||
|
||||
@@ -131,6 +131,17 @@ a {
|
||||
.logue {
|
||||
padding: .2em 1.5em;
|
||||
}
|
||||
#srv_info {
|
||||
opacity: .5;
|
||||
font-size: .8em;
|
||||
color: #fc5;
|
||||
position: absolute;
|
||||
top: .5em;
|
||||
left: 2em;
|
||||
}
|
||||
#srv_info span {
|
||||
color: #fff;
|
||||
}
|
||||
a.play {
|
||||
color: #e70;
|
||||
}
|
||||
|
||||
@@ -53,6 +53,10 @@
|
||||
|
||||
<h2><a href="?h">control-panel</a></h2>
|
||||
|
||||
{%- if srv_info %}
|
||||
<div id="srv_info"><span>{{ srv_info }}</span></div>
|
||||
{%- endif %}
|
||||
|
||||
<div id="widget">
|
||||
<div id="wtoggle">♫</div>
|
||||
<div id="widgeti">
|
||||
|
||||
@@ -26,7 +26,7 @@
|
||||
</div>
|
||||
<div id="toc"></div>
|
||||
<div id="mtw">
|
||||
<textarea id="mt">{{ md }}</textarea>
|
||||
<textarea id="mt" autocomplete="off">{{ md }}</textarea>
|
||||
</div>
|
||||
<div id="mw">
|
||||
<div id="ml">
|
||||
@@ -40,7 +40,7 @@
|
||||
|
||||
{%- if edit %}
|
||||
<div id="helpbox">
|
||||
<textarea>
|
||||
<textarea autocomplete="off">
|
||||
|
||||
write markdown (most html is 🙆 too)
|
||||
|
||||
|
||||
@@ -221,7 +221,7 @@ function save(e) {
|
||||
save_cls = save_btn.getAttribute('class') + '';
|
||||
|
||||
if (save_cls.indexOf('disabled') >= 0) {
|
||||
alert('there is nothing to save');
|
||||
toast('font-size:2em;color:#fc6;width:9em;', 'no changes');
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -238,7 +238,7 @@ function save(e) {
|
||||
fd.append("lastmod", (force ? -1 : last_modified));
|
||||
fd.append("body", txt);
|
||||
|
||||
var url = (document.location + '').split('?')[0] + '?raw';
|
||||
var url = (document.location + '').split('?')[0];
|
||||
var xhr = new XMLHttpRequest();
|
||||
xhr.open('POST', url, true);
|
||||
xhr.responseType = 'text';
|
||||
@@ -289,19 +289,24 @@ function save_cb() {
|
||||
this.btn.classList.remove('force-save');
|
||||
//alert('save OK -- wrote ' + r.size + ' bytes.\n\nsha512: ' + r.sha512);
|
||||
|
||||
run_savechk(r.lastmod, this.txt, this.btn, 0);
|
||||
}
|
||||
|
||||
function run_savechk(lastmod, txt, btn, ntry) {
|
||||
// download the saved doc from the server and compare
|
||||
var url = (document.location + '').split('?')[0] + '?raw';
|
||||
var url = (document.location + '').split('?')[0] + '?raw&_=' + new Date().getTime();
|
||||
var xhr = new XMLHttpRequest();
|
||||
xhr.open('GET', url, true);
|
||||
xhr.responseType = 'text';
|
||||
xhr.onreadystatechange = save_chk;
|
||||
xhr.btn = this.save_btn;
|
||||
xhr.txt = this.txt;
|
||||
xhr.lastmod = r.lastmod;
|
||||
xhr.onreadystatechange = savechk_cb;
|
||||
xhr.lastmod = lastmod;
|
||||
xhr.txt = txt;
|
||||
xhr.btn = btn;
|
||||
xhr.ntry = ntry;
|
||||
xhr.send();
|
||||
}
|
||||
|
||||
function save_chk() {
|
||||
function savechk_cb() {
|
||||
if (this.readyState != XMLHttpRequest.DONE)
|
||||
return;
|
||||
|
||||
@@ -313,6 +318,14 @@ function save_chk() {
|
||||
var doc1 = this.txt.replace(/\r\n/g, "\n");
|
||||
var doc2 = this.responseText.replace(/\r\n/g, "\n");
|
||||
if (doc1 != doc2) {
|
||||
var that = this;
|
||||
if (that.ntry < 10) {
|
||||
// qnap funny, try a few more times
|
||||
setTimeout(function () {
|
||||
run_savechk(that.lastmod, that.txt, that.btn, that.ntry + 1)
|
||||
}, 100);
|
||||
return;
|
||||
}
|
||||
alert(
|
||||
'Error! The document on the server does not appear to have saved correctly (your editor contents and the server copy is not identical). Place the document on your clipboard for now and check the server logs for hints\n\n' +
|
||||
'Length: yours=' + doc1.length + ', server=' + doc2.length
|
||||
@@ -325,10 +338,15 @@ function save_chk() {
|
||||
last_modified = this.lastmod;
|
||||
server_md = this.txt;
|
||||
draw_md();
|
||||
toast('font-size:6em;font-family:serif;color:#cf6;width:4em;',
|
||||
'OK✔️<span style="font-size:.2em;color:#999">' + this.ntry + '</span>');
|
||||
}
|
||||
|
||||
function toast(style, msg) {
|
||||
var ok = document.createElement('div');
|
||||
ok.setAttribute('style', 'font-size:6em;font-family:serif;font-weight:bold;color:#cf6;background:#444;border-radius:.3em;padding:.6em 0;position:fixed;top:30%;left:calc(50% - 2em);width:4em;text-align:center;z-index:9001;transition:opacity 0.2s ease-in-out;opacity:1');
|
||||
ok.innerHTML = 'OK✔️';
|
||||
style += 'font-weight:bold;background:#444;border-radius:.3em;padding:.6em 0;position:fixed;top:30%;left:calc(50% - 2em);text-align:center;z-index:9001;transition:opacity 0.2s ease-in-out;opacity:1';
|
||||
ok.setAttribute('style', style);
|
||||
ok.innerHTML = msg;
|
||||
var parent = document.getElementById('m');
|
||||
document.documentElement.appendChild(ok);
|
||||
setTimeout(function () {
|
||||
@@ -520,6 +538,30 @@ function md_backspace() {
|
||||
}
|
||||
|
||||
|
||||
// paragraph jump
|
||||
function md_p_jump(down) {
|
||||
var ofs = dom_src.selectionStart;
|
||||
var txt = dom_src.value;
|
||||
|
||||
if (down) {
|
||||
while (txt[ofs] == '\n' && --ofs > 0);
|
||||
ofs = txt.indexOf("\n\n", ofs);
|
||||
if (ofs < 0)
|
||||
ofs = txt.length - 1;
|
||||
|
||||
while (txt[ofs] == '\n' && ++ofs < txt.length - 1);
|
||||
}
|
||||
else {
|
||||
txt += '\n\n';
|
||||
while (ofs > 1 && txt[ofs - 1] == '\n') ofs--;
|
||||
ofs = Math.max(0, txt.lastIndexOf("\n\n", ofs - 1));
|
||||
while (txt[ofs] == '\n' && ++ofs < txt.length - 1);
|
||||
}
|
||||
|
||||
dom_src.setSelectionRange(ofs, ofs, "none");
|
||||
}
|
||||
|
||||
|
||||
// hotkeys / toolbar
|
||||
(function () {
|
||||
function keydown(ev) {
|
||||
@@ -531,6 +573,11 @@ function md_backspace() {
|
||||
save();
|
||||
return false;
|
||||
}
|
||||
if (ev.code == "Escape" || kc == 27) {
|
||||
var d = document.getElementById('helpclose');
|
||||
if (d)
|
||||
d.click();
|
||||
}
|
||||
if (document.activeElement == dom_src) {
|
||||
if (ev.code == "Tab" || kc == 9) {
|
||||
md_indent(ev.shiftKey);
|
||||
@@ -562,6 +609,12 @@ function md_backspace() {
|
||||
if (!ctrl && !ev.shiftKey && kc == 8) {
|
||||
return md_backspace();
|
||||
}
|
||||
var up = ev.code == "ArrowUp" || kc == 38;
|
||||
var dn = ev.code == "ArrowDown" || kc == 40;
|
||||
if (ctrl && (up || dn)) {
|
||||
md_p_jump(dn);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
document.onkeydown = keydown;
|
||||
@@ -714,4 +767,4 @@ document.getElementById('help').onclick = function () {
|
||||
if (c1[a] !== c2[a])
|
||||
console.log(c1[a] + '\n' + c2[a]);
|
||||
}
|
||||
*/
|
||||
*/
|
||||
|
||||
@@ -17,7 +17,7 @@
|
||||
</div>
|
||||
</div>
|
||||
<div id="m">
|
||||
<textarea id="mt" style="display:none">{{ md }}</textarea>
|
||||
<textarea id="mt" style="display:none" autocomplete="off">{{ md }}</textarea>
|
||||
</div>
|
||||
</div>
|
||||
<script>
|
||||
|
||||
@@ -13,7 +13,7 @@ head -c $((2*1024*1024*1024)) /dev/zero | openssl enc -aes-256-ctr -pass pass:hu
|
||||
## testing multiple parallel uploads
|
||||
## usage: para | tee log
|
||||
|
||||
para() { for s in 1 2 3 4 5 6 7 8 12 16 24 32 48 64; do echo $s; for r in {1..4}; do for ((n=0;n<s;n++)); do curl -sF "act=bput" -F "f=@garbage.file" http://127.0.0.1:1234/ 2>&1 & done; wait; echo; done; done; }
|
||||
para() { for s in 1 2 3 4 5 6 7 8 12 16 24 32 48 64; do echo $s; for r in {1..4}; do for ((n=0;n<s;n++)); do curl -sF "act=bput" -F "f=@garbage.file" http://127.0.0.1:3923/ 2>&1 & done; wait; echo; done; done; }
|
||||
|
||||
|
||||
##
|
||||
@@ -36,13 +36,13 @@ for dir in "${dirs[@]}"; do for fn in ふが "$(printf \\xed\\x93)" 'qwe,rty;asd
|
||||
|
||||
fn=$(printf '\xba\xdc\xab.cab')
|
||||
echo asdf > "$fn"
|
||||
curl --cookie cppwd=wark -sF "act=bput" -F "f=@$fn" http://127.0.0.1:1234/moji/%ED%91/
|
||||
curl --cookie cppwd=wark -sF "act=bput" -F "f=@$fn" http://127.0.0.1:3923/moji/%ED%91/
|
||||
|
||||
|
||||
##
|
||||
## test compression
|
||||
|
||||
wget -S --header='Accept-Encoding: gzip' -U 'MSIE 6.0; SV1' http://127.0.0.1:1234/.cpr/deps/ogv.js -O- | md5sum; p=~ed/dev/copyparty/copyparty/web/deps/ogv.js.gz; md5sum $p; gzip -d < $p | md5sum
|
||||
wget -S --header='Accept-Encoding: gzip' -U 'MSIE 6.0; SV1' http://127.0.0.1:3923/.cpr/deps/ogv.js -O- | md5sum; p=~ed/dev/copyparty/copyparty/web/deps/ogv.js.gz; md5sum $p; gzip -d < $p | md5sum
|
||||
|
||||
|
||||
##
|
||||
@@ -113,3 +113,12 @@ function convert_markdown(md_text, dest_dom) {
|
||||
var end = tsh.slice(-2);
|
||||
console.log("render", end.pop() - end.pop(), (tsh[tsh.length - 1] - tsh[0]) / (tsh.length - 1));
|
||||
}
|
||||
|
||||
|
||||
##
|
||||
## tmpfiles.d meme
|
||||
|
||||
mk() { rm -rf /tmp/foo; sudo -u ed bash -c 'mkdir /tmp/foo; echo hi > /tmp/foo/bar'; }
|
||||
mk && t0="$(date)" && while true; do date -s "$(date '+ 1 hour')"; systemd-tmpfiles --clean; ls -1 /tmp | grep foo || break; done; echo "$t0"
|
||||
mk && sudo -u ed flock /tmp/foo sleep 40 & sleep 1; ps aux | grep -E 'sleep 40$' && t0="$(date)" && for n in {1..40}; do date -s "$(date '+ 1 day')"; systemd-tmpfiles --clean; ls -1 /tmp | grep foo || break; done; echo "$t0"
|
||||
mk && t0="$(date)" && for n in {1..40}; do date -s "$(date '+ 1 day')"; systemd-tmpfiles --clean; ls -1 /tmp | grep foo || break; tar -cf/dev/null /tmp/foo; done; echo "$t0"
|
||||
|
||||
35
docs/pretend-youre-qnap.patch
Normal file
35
docs/pretend-youre-qnap.patch
Normal file
@@ -0,0 +1,35 @@
|
||||
diff --git a/copyparty/httpcli.py b/copyparty/httpcli.py
|
||||
index 2d3c1ad..e1e85a0 100644
|
||||
--- a/copyparty/httpcli.py
|
||||
+++ b/copyparty/httpcli.py
|
||||
@@ -864,6 +864,30 @@ class HttpCli(object):
|
||||
#
|
||||
# send reply
|
||||
|
||||
+ try:
|
||||
+ fakefn = self.conn.hsrv.fakefn
|
||||
+ fakectr = self.conn.hsrv.fakectr
|
||||
+ fakedata = self.conn.hsrv.fakedata
|
||||
+ except:
|
||||
+ fakefn = b''
|
||||
+ fakectr = 0
|
||||
+ fakedata = b''
|
||||
+
|
||||
+ self.log('\n{} {}\n{}'.format(fakefn, fakectr, open_args[0]))
|
||||
+ if fakefn == open_args[0] and fakectr > 0:
|
||||
+ self.reply(fakedata, mime=guess_mime(req_path)[0])
|
||||
+ self.conn.hsrv.fakectr = fakectr - 1
|
||||
+ else:
|
||||
+ with open_func(*open_args) as f:
|
||||
+ fakedata = f.read()
|
||||
+
|
||||
+ self.conn.hsrv.fakefn = open_args[0]
|
||||
+ self.conn.hsrv.fakedata = fakedata
|
||||
+ self.conn.hsrv.fakectr = 15
|
||||
+ self.reply(fakedata, mime=guess_mime(req_path)[0])
|
||||
+
|
||||
+ return True
|
||||
+
|
||||
self.out_headers["Accept-Ranges"] = "bytes"
|
||||
self.send_headers(
|
||||
length=upper - lower,
|
||||
100
scripts/fusefuzz.py
Executable file
100
scripts/fusefuzz.py
Executable file
@@ -0,0 +1,100 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import os
|
||||
import time
|
||||
|
||||
"""
|
||||
mkdir -p /dev/shm/fusefuzz/{r,v}
|
||||
PYTHONPATH=.. python3 -m copyparty -v /dev/shm/fusefuzz/r::r -i 127.0.0.1
|
||||
../bin/copyparty-fuse.py /dev/shm/fusefuzz/v http://127.0.0.1:3923/ 2 0
|
||||
(d="$PWD"; cd /dev/shm/fusefuzz && "$d"/fusefuzz.py)
|
||||
"""
|
||||
|
||||
|
||||
def chk(fsz, rsz, ofs0, shift, ofs, rf, vf):
|
||||
if ofs != rf.tell():
|
||||
rf.seek(ofs)
|
||||
vf.seek(ofs)
|
||||
|
||||
rb = rf.read(rsz)
|
||||
vb = vf.read(rsz)
|
||||
|
||||
print(f"fsz {fsz} rsz {rsz} ofs {ofs0} shift {shift} ofs {ofs} = {len(rb)}")
|
||||
|
||||
if rb != vb:
|
||||
for n, buf in enumerate([rb, vb]):
|
||||
with open("buf." + str(n), "wb") as f:
|
||||
f.write(buf)
|
||||
|
||||
raise Exception(f"{len(rb)} != {len(vb)}")
|
||||
|
||||
return rb, vb
|
||||
|
||||
|
||||
def main():
|
||||
v = "v"
|
||||
for n in range(5):
|
||||
with open(f"r/{n}", "wb") as f:
|
||||
f.write(b"h" * n)
|
||||
|
||||
rand = os.urandom(7919) # prime
|
||||
for fsz in range(1024 * 1024 * 2 - 3, 1024 * 1024 * 2 + 3):
|
||||
with open("r/f", "wb", fsz) as f:
|
||||
f.write((rand * int(fsz / len(rand) + 1))[:fsz])
|
||||
|
||||
for rsz in range(64 * 1024 - 2, 64 * 1024 + 2):
|
||||
ofslist = [0, 1, 2]
|
||||
for n in range(3):
|
||||
ofslist.append(fsz - n)
|
||||
ofslist.append(fsz - (rsz * 1 + n))
|
||||
ofslist.append(fsz - (rsz * 2 + n))
|
||||
|
||||
for ofs0 in ofslist:
|
||||
for shift in range(-3, 3):
|
||||
print(f"fsz {fsz} rsz {rsz} ofs {ofs0} shift {shift}")
|
||||
ofs = ofs0
|
||||
if ofs < 0 or ofs >= fsz:
|
||||
continue
|
||||
|
||||
for n in range(1, 3):
|
||||
with open(f"{v}/{n}", "rb") as f:
|
||||
f.read()
|
||||
|
||||
prev_ofs = -99
|
||||
with open("r/f", "rb", rsz) as rf:
|
||||
with open(f"{v}/f", "rb", rsz) as vf:
|
||||
while True:
|
||||
ofs += shift
|
||||
if ofs < 0 or ofs > fsz or ofs == prev_ofs:
|
||||
break
|
||||
|
||||
prev_ofs = ofs
|
||||
|
||||
rb, vb = chk(fsz, rsz, ofs0, shift, ofs, rf, vf)
|
||||
|
||||
if not rb:
|
||||
break
|
||||
|
||||
ofs += len(rb)
|
||||
|
||||
for n in range(1, 3):
|
||||
with open(f"{v}/{n}", "rb") as f:
|
||||
f.read()
|
||||
|
||||
with open("r/f", "rb", rsz) as rf:
|
||||
with open(f"{v}/f", "rb", rsz) as vf:
|
||||
for n in range(2):
|
||||
ofs += shift
|
||||
if ofs < 0 or ofs > fsz:
|
||||
break
|
||||
|
||||
rb, vb = chk(fsz, rsz, ofs0, shift, ofs, rf, vf)
|
||||
|
||||
ofs -= rsz
|
||||
|
||||
# bumping fsz, sleep away the dentry cache in cppf
|
||||
time.sleep(1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -426,6 +426,16 @@ def run(tmp, py):
|
||||
msg("will use:", py)
|
||||
msg("bound to:", tmp)
|
||||
|
||||
# "systemd-tmpfiles-clean.timer"?? HOW do you even come up with this shit
|
||||
try:
|
||||
import fcntl
|
||||
|
||||
fd = os.open(tmp, os.O_RDONLY)
|
||||
fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
|
||||
tmp = os.readlink(tmp) # can't flock a symlink, even with O_NOFOLLOW
|
||||
except:
|
||||
pass
|
||||
|
||||
fp_py = os.path.join(tmp, "py")
|
||||
with open(fp_py, "wb") as f:
|
||||
f.write(py.encode("utf-8") + b"\n")
|
||||
|
||||
Reference in New Issue
Block a user