mirror of
https://github.com/9001/copyparty.git
synced 2025-10-25 00:53:47 +00:00
Compare commits
12 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
482dd7a938 | ||
|
|
bddcc69438 | ||
|
|
19d4540630 | ||
|
|
4f5f6c81f5 | ||
|
|
7e4c1238ba | ||
|
|
f7196ac773 | ||
|
|
7a7c832000 | ||
|
|
2b4ccdbebb | ||
|
|
0d16b49489 | ||
|
|
768405b691 | ||
|
|
da01413b7b | ||
|
|
914e22c53e |
@@ -243,7 +243,8 @@ def main():
|
||||
ap.add_argument("-nw", action="store_true", help="disable writes (benchmark)")
|
||||
ap.add_argument("-nih", action="store_true", help="no info hostname")
|
||||
ap.add_argument("-nid", action="store_true", help="no info disk-usage")
|
||||
ap.add_argument("--no-sendfile", action="store_true", help="disable sendfile")
|
||||
ap.add_argument("--no-sendfile", action="store_true", help="disable sendfile (for debugging)")
|
||||
ap.add_argument("--no-scandir", action="store_true", help="disable scandir (for debugging)")
|
||||
ap.add_argument("--urlform", type=str, default="print,get", help="how to handle url-forms")
|
||||
ap.add_argument("--salt", type=str, default="hunter2", help="up2k file-hash salt")
|
||||
|
||||
@@ -255,6 +256,7 @@ def main():
|
||||
ap2.add_argument("-e2ts", action="store_true", help="enable metadata scanner, sets -e2t")
|
||||
ap2.add_argument("-e2tsr", action="store_true", help="rescan all metadata, sets -e2ts")
|
||||
ap2.add_argument("--no-mutagen", action="store_true", help="use ffprobe for tags instead")
|
||||
ap2.add_argument("--no-mtag-mt", action="store_true", help="disable tag-read parallelism")
|
||||
ap2.add_argument("-mtm", metavar="M=t,t,t", action="append", type=str, help="add/replace metadata mapping")
|
||||
ap2.add_argument("-mte", metavar="M,M,M", type=str, help="tags to index/display (comma-sep.)",
|
||||
default="circle,album,.tn,artist,title,.bpm,key,.dur,.q")
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
# coding: utf-8
|
||||
|
||||
VERSION = (0, 9, 1)
|
||||
VERSION = (0, 9, 3)
|
||||
CODENAME = "the strongest music server"
|
||||
BUILD_DT = (2021, 3, 3)
|
||||
BUILD_DT = (2021, 3, 4)
|
||||
|
||||
S_VERSION = ".".join(map(str, VERSION))
|
||||
S_BUILD_DT = "{0:04d}-{1:02d}-{2:02d}".format(*BUILD_DT)
|
||||
|
||||
@@ -6,7 +6,7 @@ import re
|
||||
import threading
|
||||
|
||||
from .__init__ import PY2, WINDOWS
|
||||
from .util import undot, Pebkac, fsdec, fsenc
|
||||
from .util import undot, Pebkac, fsdec, fsenc, statdir
|
||||
|
||||
|
||||
class VFS(object):
|
||||
@@ -102,12 +102,11 @@ class VFS(object):
|
||||
|
||||
return fsdec(os.path.realpath(fsenc(rp)))
|
||||
|
||||
def ls(self, rem, uname):
|
||||
def ls(self, rem, uname, scandir, lstat=False):
|
||||
"""return user-readable [fsdir,real,virt] items at vpath"""
|
||||
virt_vis = {} # nodes readable by user
|
||||
abspath = self.canonical(rem)
|
||||
items = os.listdir(fsenc(abspath))
|
||||
real = [fsdec(x) for x in items]
|
||||
real = list(statdir(print, scandir, lstat, abspath))
|
||||
real.sort()
|
||||
if not rem:
|
||||
for name, vn2 in sorted(self.nodes.items()):
|
||||
@@ -115,7 +114,7 @@ class VFS(object):
|
||||
virt_vis[name] = vn2
|
||||
|
||||
# no vfs nodes in the list of real inodes
|
||||
real = [x for x in real if x not in self.nodes]
|
||||
real = [x for x in real if x[0] not in self.nodes]
|
||||
|
||||
return [abspath, real, virt_vis]
|
||||
|
||||
@@ -315,7 +314,7 @@ class AuthSrv(object):
|
||||
if (self.args.e2ds and vol.uwrite) or self.args.e2dsa:
|
||||
vol.flags["e2ds"] = True
|
||||
|
||||
if self.args.e2d:
|
||||
if self.args.e2d or "e2ds" in vol.flags:
|
||||
vol.flags["e2d"] = True
|
||||
|
||||
for k in ["e2t", "e2ts", "e2tsr"]:
|
||||
|
||||
@@ -345,6 +345,10 @@ class HttpCli(object):
|
||||
with open(path, "wb", 512 * 1024) as f:
|
||||
post_sz, _, sha_b64 = hashcopy(self.conn, reader, f)
|
||||
|
||||
self.conn.hsrv.broker.put(
|
||||
False, "up2k.hash_file", vfs.realpath, vfs.flags, rem, fn
|
||||
)
|
||||
|
||||
return post_sz, sha_b64, remains, path
|
||||
|
||||
def handle_stash(self):
|
||||
@@ -675,6 +679,9 @@ class HttpCli(object):
|
||||
raise Pebkac(400, "empty files in post")
|
||||
|
||||
files.append([sz, sha512_hex])
|
||||
self.conn.hsrv.broker.put(
|
||||
False, "up2k.hash_file", vfs.realpath, vfs.flags, rem, fname
|
||||
)
|
||||
self.conn.nbyte += sz
|
||||
|
||||
except Pebkac:
|
||||
@@ -1112,7 +1119,7 @@ class HttpCli(object):
|
||||
|
||||
try:
|
||||
vn, rem = self.auth.vfs.get(top, self.uname, True, False)
|
||||
fsroot, vfs_ls, vfs_virt = vn.ls(rem, self.uname)
|
||||
fsroot, vfs_ls, vfs_virt = vn.ls(rem, self.uname, not self.args.no_scandir)
|
||||
except:
|
||||
vfs_ls = []
|
||||
vfs_virt = {}
|
||||
@@ -1123,13 +1130,13 @@ class HttpCli(object):
|
||||
|
||||
dirs = []
|
||||
|
||||
vfs_ls = [x[0] for x in vfs_ls if stat.S_ISDIR(x[1].st_mode)]
|
||||
|
||||
if not self.args.ed or "dots" not in self.uparam:
|
||||
vfs_ls = exclude_dotfiles(vfs_ls)
|
||||
|
||||
for fn in [x for x in vfs_ls if x != excl]:
|
||||
abspath = os.path.join(fsroot, fn)
|
||||
if os.path.isdir(abspath):
|
||||
dirs.append(fn)
|
||||
dirs.append(fn)
|
||||
|
||||
for x in vfs_virt.keys():
|
||||
if x != excl:
|
||||
@@ -1168,7 +1175,9 @@ class HttpCli(object):
|
||||
|
||||
return self.tx_file(abspath)
|
||||
|
||||
fsroot, vfs_ls, vfs_virt = vn.ls(rem, self.uname)
|
||||
fsroot, vfs_ls, vfs_virt = vn.ls(rem, self.uname, not self.args.no_scandir)
|
||||
stats = {k: v for k, v in vfs_ls}
|
||||
vfs_ls = [x[0] for x in vfs_ls]
|
||||
vfs_ls.extend(vfs_virt.keys())
|
||||
|
||||
# check for old versions of files,
|
||||
@@ -1219,7 +1228,7 @@ class HttpCli(object):
|
||||
fspath = fsroot + "/" + fn
|
||||
|
||||
try:
|
||||
inf = os.stat(fsenc(fspath))
|
||||
inf = stats.get(fn) or os.stat(fsenc(fspath))
|
||||
except:
|
||||
self.log("broken symlink: {}".format(repr(fspath)))
|
||||
continue
|
||||
@@ -1251,7 +1260,7 @@ class HttpCli(object):
|
||||
"sz": sz,
|
||||
"ext": ext,
|
||||
"dt": dt,
|
||||
"ts": inf.st_mtime,
|
||||
"ts": int(inf.st_mtime),
|
||||
}
|
||||
if is_dir:
|
||||
dirs.append(item)
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
# coding: utf-8
|
||||
from __future__ import print_function, unicode_literals
|
||||
from math import fabs
|
||||
|
||||
import re
|
||||
import os
|
||||
@@ -16,19 +15,21 @@ class MTag(object):
|
||||
def __init__(self, log_func, args):
|
||||
self.log_func = log_func
|
||||
self.usable = True
|
||||
self.prefer_mt = False
|
||||
mappings = args.mtm
|
||||
backend = "ffprobe" if args.no_mutagen else "mutagen"
|
||||
self.backend = "ffprobe" if args.no_mutagen else "mutagen"
|
||||
|
||||
if backend == "mutagen":
|
||||
if self.backend == "mutagen":
|
||||
self.get = self.get_mutagen
|
||||
try:
|
||||
import mutagen
|
||||
except:
|
||||
self.log("\033[33mcould not load mutagen, trying ffprobe instead")
|
||||
backend = "ffprobe"
|
||||
self.backend = "ffprobe"
|
||||
|
||||
if backend == "ffprobe":
|
||||
if self.backend == "ffprobe":
|
||||
self.get = self.get_ffprobe
|
||||
self.prefer_mt = True
|
||||
# about 20x slower
|
||||
if PY2:
|
||||
cmd = ["ffprobe", "-version"]
|
||||
|
||||
@@ -3,7 +3,6 @@ from __future__ import print_function, unicode_literals
|
||||
|
||||
import re
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
import math
|
||||
import json
|
||||
@@ -28,6 +27,7 @@ from .util import (
|
||||
atomic_move,
|
||||
w8b64enc,
|
||||
w8b64dec,
|
||||
statdir,
|
||||
)
|
||||
from .mtag import MTag
|
||||
from .authsrv import AuthSrv
|
||||
@@ -51,17 +51,21 @@ class Up2k(object):
|
||||
self.broker = broker
|
||||
self.args = broker.args
|
||||
self.log_func = broker.log
|
||||
self.persist = self.args.e2d
|
||||
|
||||
# config
|
||||
self.salt = broker.args.salt
|
||||
|
||||
# state
|
||||
self.mutex = threading.Lock()
|
||||
self.hashq = Queue()
|
||||
self.tagq = Queue()
|
||||
self.registry = {}
|
||||
self.entags = {}
|
||||
self.flags = {}
|
||||
self.cur = {}
|
||||
self.mtag = None
|
||||
self.n_mtag_thr_alive = 0
|
||||
self.n_mtag_tags_added = 0
|
||||
|
||||
self.mem_cur = None
|
||||
if HAVE_SQLITE3:
|
||||
@@ -76,25 +80,29 @@ class Up2k(object):
|
||||
thr.daemon = True
|
||||
thr.start()
|
||||
|
||||
self.mtag = MTag(self.log_func, self.args)
|
||||
if not self.mtag.usable:
|
||||
self.mtag = None
|
||||
|
||||
# static
|
||||
self.r_hash = re.compile("^[0-9a-zA-Z_-]{43}$")
|
||||
|
||||
if self.persist and not HAVE_SQLITE3:
|
||||
if not HAVE_SQLITE3:
|
||||
self.log("could not initialize sqlite3, will use in-memory registry only")
|
||||
|
||||
# this is kinda jank
|
||||
auth = AuthSrv(self.args, self.log, False)
|
||||
self.init_indexes(auth)
|
||||
have_e2d = self.init_indexes(auth)
|
||||
|
||||
if self.persist:
|
||||
if have_e2d:
|
||||
thr = threading.Thread(target=self._snapshot)
|
||||
thr.daemon = True
|
||||
thr.start()
|
||||
|
||||
thr = threading.Thread(target=self._tagger)
|
||||
thr.daemon = True
|
||||
thr.start()
|
||||
|
||||
thr = threading.Thread(target=self._hasher)
|
||||
thr.daemon = True
|
||||
thr.start()
|
||||
|
||||
def log(self, msg):
|
||||
self.log_func("up2k", msg + "\033[K")
|
||||
|
||||
@@ -137,6 +145,7 @@ class Up2k(object):
|
||||
self.pp = ProgressPrinter()
|
||||
vols = auth.vfs.all_vols.values()
|
||||
t0 = time.time()
|
||||
have_e2d = False
|
||||
|
||||
live_vols = []
|
||||
for vol in vols:
|
||||
@@ -148,6 +157,16 @@ class Up2k(object):
|
||||
|
||||
vols = live_vols
|
||||
|
||||
need_mtag = False
|
||||
for vol in auth.vfs.all_vols.values():
|
||||
if "e2t" in vol.flags:
|
||||
need_mtag = True
|
||||
|
||||
if need_mtag:
|
||||
self.mtag = MTag(self.log_func, self.args)
|
||||
if not self.mtag.usable:
|
||||
self.mtag = None
|
||||
|
||||
# e2ds(a) volumes first,
|
||||
# also covers tags where e2ts is set
|
||||
for vol in vols:
|
||||
@@ -157,6 +176,9 @@ class Up2k(object):
|
||||
|
||||
self.entags[vol.realpath] = en
|
||||
|
||||
if "e2d" in vol.flags:
|
||||
have_e2d = True
|
||||
|
||||
if "e2ds" in vol.flags:
|
||||
r = self._build_file_index(vol, vols)
|
||||
if not r:
|
||||
@@ -185,6 +207,8 @@ class Up2k(object):
|
||||
msg = "\033[31mcould not read tags because no backends are available (mutagen or ffprobe)\033[0m"
|
||||
self.log(msg)
|
||||
|
||||
return have_e2d
|
||||
|
||||
def register_vpath(self, ptop, flags):
|
||||
with self.mutex:
|
||||
if ptop in self.registry:
|
||||
@@ -192,7 +216,7 @@ class Up2k(object):
|
||||
|
||||
reg = {}
|
||||
path = os.path.join(ptop, ".hist", "up2k.snap")
|
||||
if self.persist and os.path.exists(path):
|
||||
if "e2d" in flags and os.path.exists(path):
|
||||
with gzip.GzipFile(path, "rb") as f:
|
||||
j = f.read().decode("utf-8")
|
||||
|
||||
@@ -206,7 +230,7 @@ class Up2k(object):
|
||||
|
||||
self.flags[ptop] = flags
|
||||
self.registry[ptop] = reg
|
||||
if not self.persist or not HAVE_SQLITE3 or "d2d" in flags:
|
||||
if not HAVE_SQLITE3 or "e2d" not in flags or "d2d" in flags:
|
||||
return None
|
||||
|
||||
try:
|
||||
@@ -269,23 +293,12 @@ class Up2k(object):
|
||||
self.log(msg)
|
||||
|
||||
def _build_dir(self, dbw, top, excl, cdir):
|
||||
try:
|
||||
inodes = [fsdec(x) for x in os.listdir(fsenc(cdir))]
|
||||
except Exception as ex:
|
||||
self.log("listdir: {} @ [{}]".format(repr(ex), cdir))
|
||||
return 0
|
||||
|
||||
self.pp.msg = "a{} {}".format(self.pp.n, cdir)
|
||||
histdir = os.path.join(top, ".hist")
|
||||
ret = 0
|
||||
for inode in inodes:
|
||||
abspath = os.path.join(cdir, inode)
|
||||
try:
|
||||
inf = os.stat(fsenc(abspath))
|
||||
except Exception as ex:
|
||||
self.log("stat: {} @ [{}]".format(repr(ex), abspath))
|
||||
continue
|
||||
|
||||
for iname, inf in statdir(self.log, not self.args.no_scandir, False, cdir):
|
||||
abspath = os.path.join(cdir, iname)
|
||||
lmod = int(inf.st_mtime)
|
||||
if stat.S_ISDIR(inf.st_mode):
|
||||
if abspath in excl or abspath == histdir:
|
||||
continue
|
||||
@@ -311,11 +324,11 @@ class Up2k(object):
|
||||
self.log(m.format(top, rp, len(in_db), rep_db))
|
||||
dts = -1
|
||||
|
||||
if dts == inf.st_mtime and dsz == inf.st_size:
|
||||
if dts == lmod and dsz == inf.st_size:
|
||||
continue
|
||||
|
||||
m = "reindex [{}] => [{}] ({}/{}) ({}/{})".format(
|
||||
top, rp, dts, inf.st_mtime, dsz, inf.st_size
|
||||
top, rp, dts, lmod, dsz, inf.st_size
|
||||
)
|
||||
self.log(m)
|
||||
self.db_rm(dbw[0], rd, fn)
|
||||
@@ -334,7 +347,7 @@ class Up2k(object):
|
||||
continue
|
||||
|
||||
wark = up2k_wark_from_hashlist(self.salt, inf.st_size, hashes)
|
||||
self.db_add(dbw[0], wark, rd, fn, inf.st_mtime, inf.st_size)
|
||||
self.db_add(dbw[0], wark, rd, fn, lmod, inf.st_size)
|
||||
dbw[1] += 1
|
||||
ret += 1
|
||||
td = time.time() - dbw[2]
|
||||
@@ -415,7 +428,24 @@ class Up2k(object):
|
||||
if not self.mtag:
|
||||
return n_add, n_rm, False
|
||||
|
||||
mpool = False
|
||||
if self.mtag.prefer_mt and not self.args.no_mtag_mt:
|
||||
# mp.pool.ThreadPool and concurrent.futures.ThreadPoolExecutor
|
||||
# both do crazy runahead so lets reinvent another wheel
|
||||
nw = os.cpu_count()
|
||||
if not self.n_mtag_thr_alive:
|
||||
msg = 'using {} cores for tag reader "{}"'
|
||||
self.log(msg.format(nw, self.mtag.backend))
|
||||
|
||||
self.n_mtag_thr_alive = nw
|
||||
mpool = Queue(nw)
|
||||
for _ in range(nw):
|
||||
thr = threading.Thread(target=self._tag_thr, args=(mpool,))
|
||||
thr.daemon = True
|
||||
thr.start()
|
||||
|
||||
c2 = cur.connection.cursor()
|
||||
c3 = cur.connection.cursor()
|
||||
n_left = cur.execute("select count(w) from up").fetchone()[0]
|
||||
for w, rd, fn in cur.execute("select w, rd, fn from up"):
|
||||
n_left -= 1
|
||||
@@ -425,17 +455,17 @@ class Up2k(object):
|
||||
|
||||
abspath = os.path.join(ptop, rd, fn)
|
||||
self.pp.msg = "c{} {}".format(n_left, abspath)
|
||||
tags = self.mtag.get(abspath)
|
||||
tags = {k: v for k, v in tags.items() if k in entags}
|
||||
if not tags:
|
||||
# indicate scanned without tags
|
||||
tags = {"x": 0}
|
||||
args = c3, entags, w, abspath
|
||||
if not mpool:
|
||||
n_tags = self._tag_file(*args)
|
||||
else:
|
||||
mpool.put(args)
|
||||
with self.mutex:
|
||||
n_tags = self.n_mtag_tags_added
|
||||
self.n_mtag_tags_added = 0
|
||||
|
||||
for k, v in tags.items():
|
||||
q = "insert into mt values (?,?,?)"
|
||||
c2.execute(q, (w[:16], k, v))
|
||||
n_add += 1
|
||||
n_buf += 1
|
||||
n_add += n_tags
|
||||
n_buf += n_tags
|
||||
|
||||
td = time.time() - last_write
|
||||
if n_buf >= 4096 or td >= 60:
|
||||
@@ -444,10 +474,50 @@ class Up2k(object):
|
||||
last_write = time.time()
|
||||
n_buf = 0
|
||||
|
||||
if self.n_mtag_thr_alive:
|
||||
mpool.join()
|
||||
for _ in range(self.n_mtag_thr_alive):
|
||||
mpool.put(None)
|
||||
|
||||
c3.close()
|
||||
c2.close()
|
||||
|
||||
return n_add, n_rm, True
|
||||
|
||||
def _tag_thr(self, q):
|
||||
while True:
|
||||
task = q.get()
|
||||
if not task:
|
||||
break
|
||||
|
||||
try:
|
||||
write_cur, entags, wark, abspath = task
|
||||
tags = self.mtag.get(abspath)
|
||||
with self.mutex:
|
||||
n = self._tag_file(write_cur, entags, wark, abspath, tags)
|
||||
self.n_mtag_tags_added += n
|
||||
except:
|
||||
with self.mutex:
|
||||
self.n_mtag_thr_alive -= 1
|
||||
raise
|
||||
finally:
|
||||
q.task_done()
|
||||
|
||||
def _tag_file(self, write_cur, entags, wark, abspath, tags=None):
|
||||
tags = tags or self.mtag.get(abspath)
|
||||
tags = {k: v for k, v in tags.items() if k in entags}
|
||||
if not tags:
|
||||
# indicate scanned without tags
|
||||
tags = {"x": 0}
|
||||
|
||||
ret = 0
|
||||
for k, v in tags.items():
|
||||
q = "insert into mt values (?,?,?)"
|
||||
write_cur.execute(q, (wark[:16], k, v))
|
||||
ret += 1
|
||||
|
||||
return ret
|
||||
|
||||
def _orz(self, db_path):
|
||||
return sqlite3.connect(db_path, check_same_thread=False).cursor()
|
||||
|
||||
@@ -779,17 +849,33 @@ class Up2k(object):
|
||||
if WINDOWS:
|
||||
self.lastmod_q.put([dst, (int(time.time()), int(job["lmod"]))])
|
||||
|
||||
cur = self.cur.get(job["ptop"], None)
|
||||
if cur:
|
||||
j = job
|
||||
self.db_rm(cur, j["prel"], j["name"])
|
||||
self.db_add(cur, j["wark"], j["prel"], j["name"], j["lmod"], j["size"])
|
||||
cur.connection.commit()
|
||||
|
||||
# legit api sware 2 me mum
|
||||
if self.idx_wark(
|
||||
job["ptop"],
|
||||
job["wark"],
|
||||
job["prel"],
|
||||
job["name"],
|
||||
job["lmod"],
|
||||
job["size"],
|
||||
):
|
||||
del self.registry[ptop][wark]
|
||||
# in-memory registry is reserved for unfinished uploads
|
||||
|
||||
return ret, dst
|
||||
return ret, dst
|
||||
|
||||
def idx_wark(self, ptop, wark, rd, fn, lmod, sz):
|
||||
cur = self.cur.get(ptop, None)
|
||||
if not cur:
|
||||
return False
|
||||
|
||||
self.db_rm(cur, rd, fn)
|
||||
self.db_add(cur, wark, rd, fn, int(lmod), sz)
|
||||
cur.connection.commit()
|
||||
|
||||
if "e2t" in self.flags[ptop]:
|
||||
self.tagq.put([ptop, wark, rd, fn])
|
||||
|
||||
return True
|
||||
|
||||
def db_rm(self, db, rd, fn):
|
||||
sql = "delete from up where rd = ? and fn = ?"
|
||||
@@ -940,6 +1026,45 @@ class Up2k(object):
|
||||
self.log("snap: {} |{}|".format(path, len(reg.keys())))
|
||||
prev[k] = etag
|
||||
|
||||
def _tagger(self):
|
||||
while True:
|
||||
ptop, wark, rd, fn = self.tagq.get()
|
||||
abspath = os.path.join(ptop, rd, fn)
|
||||
self.log("tagging " + abspath)
|
||||
with self.mutex:
|
||||
cur = self.cur[ptop]
|
||||
if not cur:
|
||||
self.log("\033[31mno cursor to write tags with??")
|
||||
continue
|
||||
|
||||
entags = self.entags[ptop]
|
||||
if not entags:
|
||||
self.log("\033[33mno entags okay.jpg")
|
||||
continue
|
||||
|
||||
if "e2t" in self.flags[ptop]:
|
||||
self._tag_file(cur, entags, wark, abspath)
|
||||
|
||||
cur.connection.commit()
|
||||
|
||||
def _hasher(self):
|
||||
while True:
|
||||
ptop, rd, fn = self.hashq.get()
|
||||
if "e2d" not in self.flags[ptop]:
|
||||
continue
|
||||
|
||||
abspath = os.path.join(ptop, rd, fn)
|
||||
self.log("hashing " + abspath)
|
||||
inf = os.stat(fsenc(abspath))
|
||||
hashes = self._hashlist_from_file(abspath)
|
||||
wark = up2k_wark_from_hashlist(self.salt, inf.st_size, hashes)
|
||||
with self.mutex:
|
||||
self.idx_wark(ptop, wark, rd, fn, inf.st_mtime, inf.st_size)
|
||||
|
||||
def hash_file(self, ptop, flags, rd, fn):
|
||||
self.register_vpath(ptop, flags)
|
||||
self.hashq.put([ptop, rd, fn])
|
||||
|
||||
|
||||
def up2k_chunksize(filesize):
|
||||
chunksize = 1024 * 1024
|
||||
|
||||
@@ -521,9 +521,7 @@ def u8safe(txt):
|
||||
|
||||
|
||||
def exclude_dotfiles(filepaths):
|
||||
for fpath in filepaths:
|
||||
if not fpath.split("/")[-1].startswith("."):
|
||||
yield fpath
|
||||
return [x for x in filepaths if not x.split("/")[-1].startswith(".")]
|
||||
|
||||
|
||||
def html_escape(s, quote=False):
|
||||
@@ -726,6 +724,30 @@ def sendfile_kern(lower, upper, f, s):
|
||||
return 0
|
||||
|
||||
|
||||
def statdir(logger, scandir, lstat, top):
|
||||
try:
|
||||
btop = fsenc(top)
|
||||
if scandir and hasattr(os, "scandir"):
|
||||
src = "scandir"
|
||||
with os.scandir(btop) as dh:
|
||||
for fh in dh:
|
||||
try:
|
||||
yield [fsdec(fh.name), fh.stat(follow_symlinks=not lstat)]
|
||||
except Exception as ex:
|
||||
logger("scan-stat: {} @ {}".format(repr(ex), fsdec(fh.path)))
|
||||
else:
|
||||
src = "listdir"
|
||||
fun = os.lstat if lstat else os.stat
|
||||
for name in os.listdir(btop):
|
||||
abspath = os.path.join(btop, name)
|
||||
try:
|
||||
yield [fsdec(name), fun(abspath)]
|
||||
except Exception as ex:
|
||||
logger("list-stat: {} @ {}".format(repr(ex), fsdec(abspath)))
|
||||
except Exception as ex:
|
||||
logger("{}: {} @ {}".format(src, repr(ex), top))
|
||||
|
||||
|
||||
def unescape_cookie(orig):
|
||||
# mw=idk; doot=qwe%2Crty%3Basd+fgh%2Bjkl%25zxc%26vbn # qwe,rty;asd fgh+jkl%zxc&vbn
|
||||
ret = ""
|
||||
|
||||
@@ -67,16 +67,18 @@ a,
|
||||
#files a:hover {
|
||||
color: #fff;
|
||||
background: #161616;
|
||||
text-decoration: underline;
|
||||
}
|
||||
#files thead a {
|
||||
color: #999;
|
||||
font-weight: normal;
|
||||
}
|
||||
#files tr:hover {
|
||||
#files tr+tr:hover {
|
||||
background: #1c1c1c;
|
||||
}
|
||||
#files thead th {
|
||||
padding: .5em 1.3em .3em 1.3em;
|
||||
cursor: pointer;
|
||||
}
|
||||
#files thead th:last-child {
|
||||
background: #444;
|
||||
@@ -305,11 +307,11 @@ a,
|
||||
width: calc(100% - 10.5em);
|
||||
background: rgba(0,0,0,0.2);
|
||||
}
|
||||
@media (min-width: 100em) {
|
||||
@media (min-width: 90em) {
|
||||
#barpos,
|
||||
#barbuf {
|
||||
width: calc(100% - 24em);
|
||||
left: 10em;
|
||||
left: 9.8em;
|
||||
top: .7em;
|
||||
height: 1.6em;
|
||||
bottom: auto;
|
||||
@@ -448,12 +450,27 @@ input[type="checkbox"]:checked+label {
|
||||
#tree {
|
||||
padding-top: 2em;
|
||||
}
|
||||
#tree>a+a {
|
||||
padding: .2em .4em;
|
||||
font-size: 1.2em;
|
||||
background: #2a2a2a;
|
||||
box-shadow: 0 .1em .2em #222 inset;
|
||||
border-radius: .3em;
|
||||
margin: .2em;
|
||||
position: relative;
|
||||
top: -.2em;
|
||||
}
|
||||
#tree>a+a:hover {
|
||||
background: #805;
|
||||
}
|
||||
#tree>a+a.on {
|
||||
background: #fc4;
|
||||
color: #400;
|
||||
text-shadow: none;
|
||||
}
|
||||
#detree {
|
||||
padding: .3em .5em;
|
||||
font-size: 1.5em;
|
||||
display: inline-block;
|
||||
min-width: 12em;
|
||||
width: 100%;
|
||||
}
|
||||
#treefiles #files tbody {
|
||||
border-radius: 0 .7em 0 .7em;
|
||||
@@ -474,20 +491,20 @@ input[type="checkbox"]:checked+label {
|
||||
list-style: none;
|
||||
white-space: nowrap;
|
||||
}
|
||||
#tree a.hl {
|
||||
#treeul a.hl {
|
||||
color: #400;
|
||||
background: #fc4;
|
||||
border-radius: .3em;
|
||||
text-shadow: none;
|
||||
}
|
||||
#tree a {
|
||||
#treeul a {
|
||||
display: inline-block;
|
||||
}
|
||||
#tree a+a {
|
||||
#treeul a+a {
|
||||
width: calc(100% - 2em);
|
||||
background: #333;
|
||||
}
|
||||
#tree a+a:hover {
|
||||
#treeul a+a:hover {
|
||||
background: #222;
|
||||
color: #fff;
|
||||
}
|
||||
@@ -535,7 +552,7 @@ input[type="checkbox"]:checked+label {
|
||||
#files>thead>tr>th.min span {
|
||||
position: absolute;
|
||||
transform: rotate(270deg);
|
||||
background: linear-gradient(90deg, #222, #444);
|
||||
background: linear-gradient(90deg, rgba(68,68,68,0), rgba(68,68,68,0.5) 70%, #444);
|
||||
margin-left: -4.6em;
|
||||
padding: .4em;
|
||||
top: 5.4em;
|
||||
@@ -554,4 +571,11 @@ input[type="checkbox"]:checked+label {
|
||||
border-color: transparent;
|
||||
color: #400;
|
||||
text-shadow: none;
|
||||
}
|
||||
}
|
||||
#files tr.play a {
|
||||
color: inherit;
|
||||
}
|
||||
#files tr.play a:hover {
|
||||
color: #300;
|
||||
background: #fea;
|
||||
}
|
||||
|
||||
@@ -48,6 +48,9 @@
|
||||
<tr>
|
||||
<td id="tree">
|
||||
<a href="#" id="detree">🍞...</a>
|
||||
<a href="#" step="2" id="twobytwo">+</a>
|
||||
<a href="#" step="-2" id="twig">–</a>
|
||||
<a href="#" id="dyntree">a</a>
|
||||
<ul id="treeul"></ul>
|
||||
</td>
|
||||
<td id="treefiles"></td>
|
||||
|
||||
@@ -138,6 +138,9 @@ var pbar = (function () {
|
||||
var grad = null;
|
||||
|
||||
r.drawbuf = function () {
|
||||
if (!mp.au)
|
||||
return;
|
||||
|
||||
var cs = getComputedStyle(r.bcan);
|
||||
var sw = parseInt(cs['width']);
|
||||
var sh = parseInt(cs['height']);
|
||||
@@ -164,6 +167,9 @@ var pbar = (function () {
|
||||
}
|
||||
};
|
||||
r.drawpos = function () {
|
||||
if (!mp.au)
|
||||
return;
|
||||
|
||||
var cs = getComputedStyle(r.bcan);
|
||||
var sw = parseInt(cs['width']);
|
||||
var sh = parseInt(cs['height']);
|
||||
@@ -462,7 +468,7 @@ function play(tid, call_depth) {
|
||||
o.setAttribute('id', 'thx_js');
|
||||
if (window.history && history.replaceState) {
|
||||
var nurl = (document.location + '').split('#')[0] + '#' + oid;
|
||||
history.replaceState(ebi('files').innerHTML, nurl, nurl);
|
||||
hist_replace(ebi('files').innerHTML, nurl);
|
||||
}
|
||||
else {
|
||||
document.location.hash = oid;
|
||||
@@ -721,6 +727,10 @@ function autoplay_blocked() {
|
||||
// tree
|
||||
(function () {
|
||||
var treedata = null;
|
||||
var dyn = bcfg_get('dyntree', true);
|
||||
var treesz = icfg_get('treesz', 16);
|
||||
treesz = isNaN(treesz) ? 16 : Math.min(Math.max(treesz, 4), 50);
|
||||
console.log('treesz [' + treesz + ']');
|
||||
|
||||
function entree(e) {
|
||||
ev(e);
|
||||
@@ -779,7 +789,7 @@ function autoplay_blocked() {
|
||||
esc(top) + '">' + esc(name) +
|
||||
"</a>\n<ul>\n" + html + "</ul>";
|
||||
|
||||
var links = document.querySelectorAll('#tree a+a');
|
||||
var links = document.querySelectorAll('#treeul a+a');
|
||||
for (var a = 0, aa = links.length; a < aa; a++) {
|
||||
if (links[a].getAttribute('href') == top) {
|
||||
var o = links[a].parentNode;
|
||||
@@ -793,7 +803,10 @@ function autoplay_blocked() {
|
||||
document.querySelector('#treeul>li>a+a').textContent = '[root]';
|
||||
despin('#tree');
|
||||
reload_tree();
|
||||
rescale_tree();
|
||||
}
|
||||
|
||||
function rescale_tree() {
|
||||
var q = '#tree';
|
||||
var nq = 0;
|
||||
while (true) {
|
||||
@@ -802,18 +815,19 @@ function autoplay_blocked() {
|
||||
if (!document.querySelector(q))
|
||||
break;
|
||||
}
|
||||
ebi('treeul').style.width = (24 + nq) + 'em';
|
||||
var w = treesz + (dyn ? nq : 0);
|
||||
ebi('treeul').style.width = w + 'em';
|
||||
}
|
||||
|
||||
function reload_tree() {
|
||||
var cdir = get_vpath();
|
||||
var links = document.querySelectorAll('#tree a+a');
|
||||
var links = document.querySelectorAll('#treeul a+a');
|
||||
for (var a = 0, aa = links.length; a < aa; a++) {
|
||||
var href = links[a].getAttribute('href');
|
||||
links[a].setAttribute('class', href == cdir ? 'hl' : '');
|
||||
links[a].onclick = treego;
|
||||
}
|
||||
links = document.querySelectorAll('#tree li>a:first-child');
|
||||
links = document.querySelectorAll('#treeul li>a:first-child');
|
||||
for (var a = 0, aa = links.length; a < aa; a++) {
|
||||
links[a].setAttribute('dst', links[a].nextSibling.getAttribute('href'));
|
||||
links[a].onclick = treegrow;
|
||||
@@ -844,6 +858,7 @@ function autoplay_blocked() {
|
||||
rm.parentNode.removeChild(rm);
|
||||
}
|
||||
this.textContent = '+';
|
||||
rescale_tree();
|
||||
return;
|
||||
}
|
||||
var dst = this.getAttribute('dst');
|
||||
@@ -898,7 +913,7 @@ function autoplay_blocked() {
|
||||
html = html.join('\n');
|
||||
ebi('files').innerHTML = html;
|
||||
|
||||
history.pushState(html, this.top, this.top);
|
||||
hist_push(html, this.top);
|
||||
apply_perms(res.perms);
|
||||
despin('#files');
|
||||
|
||||
@@ -953,23 +968,45 @@ function autoplay_blocked() {
|
||||
swrite('entreed', 'na');
|
||||
}
|
||||
|
||||
function dyntree(e) {
|
||||
ev(e);
|
||||
dyn = !dyn;
|
||||
bcfg_set('dyntree', dyn);
|
||||
rescale_tree();
|
||||
}
|
||||
|
||||
function scaletree(e) {
|
||||
ev(e);
|
||||
treesz += parseInt(this.getAttribute("step"));
|
||||
if (isNaN(treesz))
|
||||
treesz = 16;
|
||||
|
||||
swrite('treesz', treesz);
|
||||
rescale_tree();
|
||||
}
|
||||
|
||||
ebi('entree').onclick = entree;
|
||||
ebi('detree').onclick = detree;
|
||||
ebi('dyntree').onclick = dyntree;
|
||||
ebi('twig').onclick = scaletree;
|
||||
ebi('twobytwo').onclick = scaletree;
|
||||
if (sread('entreed') == 'tree')
|
||||
entree();
|
||||
|
||||
window.onpopstate = function (e) {
|
||||
console.log(e.url + ' ,, ' + ((e.state + '').slice(0, 64)));
|
||||
if (e.state) {
|
||||
ebi('files').innerHTML = e.state;
|
||||
reload_tree();
|
||||
reload_browser();
|
||||
}
|
||||
var html = sessionStorage.getItem(e.state || 1);
|
||||
if (!html)
|
||||
return;
|
||||
|
||||
ebi('files').innerHTML = html;
|
||||
reload_tree();
|
||||
reload_browser();
|
||||
};
|
||||
|
||||
if (window.history && history.pushState) {
|
||||
var u = get_vpath() + window.location.hash;
|
||||
history.replaceState(ebi('files').innerHTML, u, u);
|
||||
hist_replace(ebi('files').innerHTML, u);
|
||||
}
|
||||
})();
|
||||
|
||||
|
||||
@@ -209,41 +209,7 @@ function up2k_init(have_crypto) {
|
||||
};
|
||||
}
|
||||
|
||||
function cfg_get(name) {
|
||||
var val = sread(name);
|
||||
if (val === null)
|
||||
return parseInt(ebi(name).value);
|
||||
|
||||
ebi(name).value = val;
|
||||
return val;
|
||||
}
|
||||
|
||||
function bcfg_get(name, defval) {
|
||||
var o = ebi(name);
|
||||
if (!o)
|
||||
return defval;
|
||||
|
||||
var val = sread(name);
|
||||
if (val === null)
|
||||
val = defval;
|
||||
else
|
||||
val = (val == '1');
|
||||
|
||||
o.checked = val;
|
||||
return val;
|
||||
}
|
||||
|
||||
function bcfg_set(name, val) {
|
||||
swrite(name, val ? '1' : '0');
|
||||
|
||||
var o = ebi(name);
|
||||
if (o)
|
||||
o.checked = val;
|
||||
|
||||
return val;
|
||||
}
|
||||
|
||||
var parallel_uploads = cfg_get('nthread');
|
||||
var parallel_uploads = icfg_get('nthread');
|
||||
var multitask = bcfg_get('multitask', true);
|
||||
var ask_up = bcfg_get('ask_up', true);
|
||||
var flag_en = bcfg_get('flag_en', false);
|
||||
|
||||
@@ -292,3 +292,61 @@ function jwrite(key, val) {
|
||||
else
|
||||
swrite(key, JSON.stringify(val));
|
||||
}
|
||||
|
||||
function icfg_get(name, defval) {
|
||||
var o = ebi(name);
|
||||
|
||||
var val = parseInt(sread(name));
|
||||
if (val === null)
|
||||
return parseInt(o ? o.value : defval);
|
||||
|
||||
if (o)
|
||||
o.value = val;
|
||||
|
||||
return val;
|
||||
}
|
||||
|
||||
function bcfg_get(name, defval) {
|
||||
var o = ebi(name);
|
||||
if (!o)
|
||||
return defval;
|
||||
|
||||
var val = sread(name);
|
||||
if (val === null)
|
||||
val = defval;
|
||||
else
|
||||
val = (val == '1');
|
||||
|
||||
bcfg_upd_ui(name, val);
|
||||
return val;
|
||||
}
|
||||
|
||||
function bcfg_set(name, val) {
|
||||
swrite(name, val ? '1' : '0');
|
||||
bcfg_upd_ui(name, val);
|
||||
return val;
|
||||
}
|
||||
|
||||
function bcfg_upd_ui(name, val) {
|
||||
var o = ebi(name);
|
||||
if (!o)
|
||||
return;
|
||||
|
||||
if (o.getAttribute('type') == 'checkbox')
|
||||
o.checked = val;
|
||||
else if (o)
|
||||
o.setAttribute('class', val ? 'on' : '');
|
||||
}
|
||||
|
||||
|
||||
function hist_push(html, url) {
|
||||
var key = new Date().getTime();
|
||||
sessionStorage.setItem(key, html);
|
||||
history.pushState(key, url, url);
|
||||
}
|
||||
|
||||
function hist_replace(html, url) {
|
||||
var key = new Date().getTime();
|
||||
sessionStorage.setItem(key, html);
|
||||
history.replaceState(key, url, url);
|
||||
}
|
||||
|
||||
@@ -122,7 +122,7 @@ git describe --tags >/dev/null 2>/dev/null && {
|
||||
exit 1
|
||||
}
|
||||
|
||||
dt="$(git log -1 --format=%cd --date=format:'%Y,%m,%d' | sed -E 's/,0?/, /g')"
|
||||
dt="$(git log -1 --format=%cd --date=short | sed -E 's/-0?/, /g')"
|
||||
printf 'git %3s: \033[36m%s\033[0m\n' ver "$ver" dt "$dt"
|
||||
sed -ri '
|
||||
s/^(VERSION =)(.*)/#\1\2\n\1 ('"$t_ver"')/;
|
||||
|
||||
@@ -16,6 +16,12 @@ from copyparty.authsrv import AuthSrv
|
||||
from copyparty import util
|
||||
|
||||
|
||||
class Cfg(Namespace):
|
||||
def __init__(self, a=[], v=[], c=None):
|
||||
ex = {k: False for k in "e2d e2ds e2dsa e2t e2ts e2tsr mte".split()}
|
||||
super(Cfg, self).__init__(a=a, v=v, c=c, **ex)
|
||||
|
||||
|
||||
class TestVFS(unittest.TestCase):
|
||||
def dump(self, vfs):
|
||||
print(json.dumps(vfs, indent=4, sort_keys=True, default=lambda o: o.__dict__))
|
||||
@@ -35,7 +41,13 @@ class TestVFS(unittest.TestCase):
|
||||
def ls(self, vfs, vpath, uname):
|
||||
"""helper for resolving and listing a folder"""
|
||||
vn, rem = vfs.get(vpath, uname, True, False)
|
||||
return vn.ls(rem, uname)
|
||||
r1 = vn.ls(rem, uname, False)
|
||||
r2 = vn.ls(rem, uname, False)
|
||||
self.assertEqual(r1, r2)
|
||||
|
||||
fsdir, real, virt = r1
|
||||
real = [x[0] for x in real]
|
||||
return fsdir, real, virt
|
||||
|
||||
def runcmd(self, *argv):
|
||||
p = sp.Popen(argv, stdout=sp.PIPE, stderr=sp.PIPE)
|
||||
@@ -102,7 +114,7 @@ class TestVFS(unittest.TestCase):
|
||||
f.write(fn)
|
||||
|
||||
# defaults
|
||||
vfs = AuthSrv(Namespace(c=None, a=[], v=[]), self.log).vfs
|
||||
vfs = AuthSrv(Cfg(), self.log).vfs
|
||||
self.assertEqual(vfs.nodes, {})
|
||||
self.assertEqual(vfs.vpath, "")
|
||||
self.assertEqual(vfs.realpath, td)
|
||||
@@ -110,7 +122,7 @@ class TestVFS(unittest.TestCase):
|
||||
self.assertEqual(vfs.uwrite, ["*"])
|
||||
|
||||
# single read-only rootfs (relative path)
|
||||
vfs = AuthSrv(Namespace(c=None, a=[], v=["a/ab/::r"]), self.log).vfs
|
||||
vfs = AuthSrv(Cfg(v=["a/ab/::r"]), self.log).vfs
|
||||
self.assertEqual(vfs.nodes, {})
|
||||
self.assertEqual(vfs.vpath, "")
|
||||
self.assertEqual(vfs.realpath, os.path.join(td, "a", "ab"))
|
||||
@@ -118,9 +130,7 @@ class TestVFS(unittest.TestCase):
|
||||
self.assertEqual(vfs.uwrite, [])
|
||||
|
||||
# single read-only rootfs (absolute path)
|
||||
vfs = AuthSrv(
|
||||
Namespace(c=None, a=[], v=[td + "//a/ac/../aa//::r"]), self.log
|
||||
).vfs
|
||||
vfs = AuthSrv(Cfg(v=[td + "//a/ac/../aa//::r"]), self.log).vfs
|
||||
self.assertEqual(vfs.nodes, {})
|
||||
self.assertEqual(vfs.vpath, "")
|
||||
self.assertEqual(vfs.realpath, os.path.join(td, "a", "aa"))
|
||||
@@ -129,7 +139,7 @@ class TestVFS(unittest.TestCase):
|
||||
|
||||
# read-only rootfs with write-only subdirectory (read-write for k)
|
||||
vfs = AuthSrv(
|
||||
Namespace(c=None, a=["k:k"], v=[".::r:ak", "a/ac/acb:a/ac/acb:w:ak"]),
|
||||
Cfg(a=["k:k"], v=[".::r:ak", "a/ac/acb:a/ac/acb:w:ak"]),
|
||||
self.log,
|
||||
).vfs
|
||||
self.assertEqual(len(vfs.nodes), 1)
|
||||
@@ -192,7 +202,10 @@ class TestVFS(unittest.TestCase):
|
||||
self.assertEqual(list(virt), [])
|
||||
|
||||
# admin-only rootfs with all-read-only subfolder
|
||||
vfs = AuthSrv(Namespace(c=None, a=["k:k"], v=[".::ak", "a:a:r"]), self.log,).vfs
|
||||
vfs = AuthSrv(
|
||||
Cfg(a=["k:k"], v=[".::ak", "a:a:r"]),
|
||||
self.log,
|
||||
).vfs
|
||||
self.assertEqual(len(vfs.nodes), 1)
|
||||
self.assertEqual(vfs.vpath, "")
|
||||
self.assertEqual(vfs.realpath, td)
|
||||
@@ -211,9 +224,7 @@ class TestVFS(unittest.TestCase):
|
||||
|
||||
# breadth-first construction
|
||||
vfs = AuthSrv(
|
||||
Namespace(
|
||||
c=None,
|
||||
a=[],
|
||||
Cfg(
|
||||
v=[
|
||||
"a/ac/acb:a/ac/acb:w",
|
||||
"a:a:w",
|
||||
@@ -234,7 +245,7 @@ class TestVFS(unittest.TestCase):
|
||||
self.undot(vfs, "./.././foo/..", "")
|
||||
|
||||
# shadowing
|
||||
vfs = AuthSrv(Namespace(c=None, a=[], v=[".::r", "b:a/ac:r"]), self.log).vfs
|
||||
vfs = AuthSrv(Cfg(v=[".::r", "b:a/ac:r"]), self.log).vfs
|
||||
|
||||
fsp, r1, v1 = self.ls(vfs, "", "*")
|
||||
self.assertEqual(fsp, td)
|
||||
@@ -271,7 +282,7 @@ class TestVFS(unittest.TestCase):
|
||||
).encode("utf-8")
|
||||
)
|
||||
|
||||
au = AuthSrv(Namespace(c=[cfg_path], a=[], v=[]), self.log)
|
||||
au = AuthSrv(Cfg(c=[cfg_path]), self.log)
|
||||
self.assertEqual(au.user["a"], "123")
|
||||
self.assertEqual(au.user["asd"], "fgh:jkl")
|
||||
n = au.vfs
|
||||
|
||||
Reference in New Issue
Block a user