Compare commits

...

28 Commits

Author SHA1 Message Date
ed
99d30edef3 v0.11.7 2021-06-05 03:33:29 +02:00
ed
b63ab15bf9 gallery links in new tab if a selection is atcive 2021-06-05 03:27:44 +02:00
ed
485cb4495c minify asmcrypto a bit 2021-06-05 03:25:54 +02:00
ed
df018eb1f2 add colors 2021-06-05 01:34:39 +02:00
ed
49aa47a9b8 way faster sha512 wasm fallback 2021-06-05 01:14:16 +02:00
ed
7d20eb202a optimize 2021-06-04 19:35:08 +02:00
ed
c533da9129 fix single-threaded mtag 2021-06-04 19:00:24 +02:00
ed
5cba31a814 spin on thumbnails too 2021-06-04 17:38:57 +02:00
ed
1d824cb26c add volume lister / containment checker 2021-06-04 02:23:46 +02:00
ed
83b903d60e readme: update todos 2021-06-02 09:42:33 +02:00
ed
9c8ccabe8e v0.11.6 2021-06-01 08:25:35 +02:00
ed
b1f2c4e70d gain 1000x performance with one weird trick 2021-06-01 06:17:46 +00:00
ed
273ca0c8da run tests on commit 2021-06-01 05:49:41 +02:00
ed
d6f516b34f pypi exclusive 2021-06-01 04:14:23 +02:00
ed
83127858ca v0.11.4 2021-06-01 03:55:51 +02:00
ed
d89329757e fix permission check in tar/zip generator (gdi) 2021-06-01 03:55:31 +02:00
ed
49ffec5320 v0.11.3 2021-06-01 03:11:02 +02:00
ed
2eaae2b66a fix youtube query example 2021-06-01 02:53:54 +02:00
ed
ea4441e25c v0.11.2 2021-06-01 02:47:37 +02:00
ed
e5f34042f9 more precise volume state in admin panel 2021-06-01 02:32:53 +02:00
ed
271096874a fix adv and date handling in query lang 2021-06-01 02:10:17 +02:00
ed
8efd780a72 thumbnail cleaner too noisy 2021-06-01 01:51:03 +02:00
ed
41bcf7308d fix search results as thumbnails 2021-06-01 01:41:36 +02:00
ed
d102bb3199 fix on-upload hasher (0.11.1 regression) 2021-06-01 01:20:34 +02:00
ed
d0bed95415 search: add a query language 2021-06-01 01:16:40 +02:00
ed
2528729971 add dbtool 2021-05-30 16:49:08 +00:00
ed
292c18b3d0 v0.11.1 2021-05-29 23:39:39 +02:00
ed
0be7c5e2d8 live db/tags rescan 2021-05-29 23:35:07 +02:00
25 changed files with 1071 additions and 376 deletions

View File

@@ -294,6 +294,8 @@ the same arguments can be set as volume flags, in addition to `d2d` and `d2t` fo
`e2tsr` is probably always overkill, since `e2ds`/`e2dsa` would pick up any file modifications and cause `e2ts` to reindex those `e2tsr` is probably always overkill, since `e2ds`/`e2dsa` would pick up any file modifications and cause `e2ts` to reindex those
the rescan button in the admin panel has no effect unless the volume has `-e2ds` or higher
## metadata from audio files ## metadata from audio files
@@ -518,20 +520,25 @@ in the `scripts` folder:
roughly sorted by priority roughly sorted by priority
* separate sqlite table per tag
* audio fingerprinting
* readme.md as epilogue * readme.md as epilogue
* single sha512 across all up2k chunks? maybe
* reduce up2k roundtrips * reduce up2k roundtrips
* start from a chunk index and just go * start from a chunk index and just go
* terminate client on bad data * terminate client on bad data
* `os.copy_file_range` for up2k cloning
* single sha512 across all up2k chunks? maybe
* figure out the deal with pixel3a not being connectable as hotspot
* pixel3a having unpredictable 3sec latency in general :||||
discarded ideas discarded ideas
* separate sqlite table per tag
* performance fixed by skipping some indexes (`+mt.k`)
* audio fingerprinting
* only makes sense if there can be a wasm client and that doesn't exist yet (except for olaf which is agpl hence counts as not existing)
* `os.copy_file_range` for up2k cloning
* almost never hit this path anyways
* up2k partials ui * up2k partials ui
* feels like there isn't much point
* cache sha512 chunks on client * cache sha512 chunks on client
* too dangerous
* comment field * comment field
* nah
* look into android thumbnail cache file format * look into android thumbnail cache file format
* absolutely not

View File

@@ -45,3 +45,18 @@ you could replace winfsp with [dokan](https://github.com/dokan-dev/dokany/releas
# [`mtag/`](mtag/) # [`mtag/`](mtag/)
* standalone programs which perform misc. file analysis * standalone programs which perform misc. file analysis
* copyparty can Popen programs like these during file indexing to collect additional metadata * copyparty can Popen programs like these during file indexing to collect additional metadata
# [`dbtool.py`](dbtool.py)
upgrade utility which can show db info and help transfer data between databases, for example when a new version of copyparty recommends to wipe the DB and reindex because it now collects additional metadata during analysis, but you have some really expensive `-mtp` parsers and want to copy over the tags from the old db
for that example (upgrading to v0.11.0), first move the old db aside, launch copyparty, let it rebuild the db until the point where it starts running mtp (colored messages as it adds the mtp tags), then CTRL-C and patch in the old mtp tags from the old db instead
so assuming you have `-mtp` parsers to provide the tags `key` and `.bpm`:
```
~/bin/dbtool.py -ls up2k.db
~/bin/dbtool.py -src up2k.db.v0.10.22 up2k.db -cmp
~/bin/dbtool.py -src up2k.db.v0.10.22 up2k.db -rm-mtp-flag -copy key
~/bin/dbtool.py -src up2k.db.v0.10.22 up2k.db -rm-mtp-flag -copy .bpm -vac
```

198
bin/dbtool.py Executable file
View File

@@ -0,0 +1,198 @@
#!/usr/bin/env python3
import os
import sys
import sqlite3
import argparse
DB_VER = 3
def die(msg):
print("\033[31m\n" + msg + "\n\033[0m")
sys.exit(1)
def read_ver(db):
for tab in ["ki", "kv"]:
try:
c = db.execute(r"select v from {} where k = 'sver'".format(tab))
except:
continue
rows = c.fetchall()
if rows:
return int(rows[0][0])
return "corrupt"
def ls(db):
nfiles = next(db.execute("select count(w) from up"))[0]
ntags = next(db.execute("select count(w) from mt"))[0]
print(f"{nfiles} files")
print(f"{ntags} tags\n")
print("number of occurences for each tag,")
print(" 'x' = file has no tags")
print(" 't:mtp' = the mtp flag (file not mtp processed yet)")
print()
for k, nk in db.execute("select k, count(k) from mt group by k order by k"):
print(f"{nk:9} {k}")
def compare(n1, d1, n2, d2, verbose):
nt = next(d1.execute("select count(w) from up"))[0]
n = 0
miss = 0
for w, rd, fn in d1.execute("select w, rd, fn from up"):
n += 1
if n % 25_000 == 0:
m = f"\033[36mchecked {n:,} of {nt:,} files in {n1} against {n2}\033[0m"
print(m)
q = "select w from up where substr(w,1,16) = ?"
hit = d2.execute(q, (w[:16],)).fetchone()
if not hit:
miss += 1
if verbose:
print(f"file in {n1} missing in {n2}: [{w}] {rd}/{fn}")
print(f" {miss} files in {n1} missing in {n2}\n")
nt = next(d1.execute("select count(w) from mt"))[0]
n = 0
miss = {}
nmiss = 0
for w, k, v in d1.execute("select * from mt"):
n += 1
if n % 100_000 == 0:
m = f"\033[36mchecked {n:,} of {nt:,} tags in {n1} against {n2}, so far {nmiss} missing tags\033[0m"
print(m)
v2 = d2.execute("select v from mt where w = ? and +k = ?", (w, k)).fetchone()
if v2:
v2 = v2[0]
# if v != v2 and v2 and k in [".bpm", "key"] and n2 == "src":
# print(f"{w} [{rd}/{fn}] {k} = [{v}] / [{v2}]")
if v2 is not None:
if k.startswith("."):
try:
diff = abs(float(v) - float(v2))
if diff > float(v) / 0.9:
v2 = None
else:
v2 = v
except:
pass
if v != v2:
v2 = None
if v2 is None:
nmiss += 1
try:
miss[k] += 1
except:
miss[k] = 1
if verbose:
q = "select rd, fn from up where substr(w,1,16) = ?"
rd, fn = d1.execute(q, (w,)).fetchone()
print(f"missing in {n2}: [{w}] [{rd}/{fn}] {k} = {v}")
for k, v in sorted(miss.items()):
if v:
print(f"{n1} has {v:6} more {k:<6} tags than {n2}")
print(f"in total, {nmiss} missing tags in {n2}\n")
def copy_mtp(d1, d2, tag, rm):
nt = next(d1.execute("select count(w) from mt where k = ?", (tag,)))[0]
n = 0
ndone = 0
for w, k, v in d1.execute("select * from mt where k = ?", (tag,)):
n += 1
if n % 25_000 == 0:
m = f"\033[36m{n:,} of {nt:,} tags checked, so far {ndone} copied\033[0m"
print(m)
hit = d2.execute("select v from mt where w = ? and +k = ?", (w, k)).fetchone()
if hit:
hit = hit[0]
if hit != v:
ndone += 1
if hit is not None:
d2.execute("delete from mt where w = ? and +k = ?", (w, k))
d2.execute("insert into mt values (?,?,?)", (w, k, v))
if rm:
d2.execute("delete from mt where w = ? and +k = 't:mtp'", (w,))
d2.commit()
print(f"copied {ndone} {tag} tags over")
def main():
os.system("")
print()
ap = argparse.ArgumentParser()
ap.add_argument("db", help="database to work on")
ap.add_argument("-src", metavar="DB", type=str, help="database to copy from")
ap2 = ap.add_argument_group("informational / read-only stuff")
ap2.add_argument("-v", action="store_true", help="verbose")
ap2.add_argument("-ls", action="store_true", help="list summary for db")
ap2.add_argument("-cmp", action="store_true", help="compare databases")
ap2 = ap.add_argument_group("options which modify target db")
ap2.add_argument("-copy", metavar="TAG", type=str, help="mtp tag to copy over")
ap2.add_argument(
"-rm-mtp-flag",
action="store_true",
help="when an mtp tag is copied over, also mark that as done, so copyparty won't run mtp on it",
)
ap2.add_argument("-vac", action="store_true", help="optimize DB")
ar = ap.parse_args()
for v in [ar.db, ar.src]:
if v and not os.path.exists(v):
die("database must exist")
db = sqlite3.connect(ar.db)
ds = sqlite3.connect(ar.src) if ar.src else None
for d, n in [[ds, "src"], [db, "dst"]]:
if not d:
continue
ver = read_ver(d)
if ver == "corrupt":
die("{} database appears to be corrupt, sorry")
if ver != DB_VER:
m = f"{n} db is version {ver}, this tool only supports version {DB_VER}, please upgrade it with copyparty first"
die(m)
if ar.ls:
ls(db)
if ar.cmp:
if not ds:
die("need src db to compare against")
compare("src", ds, "dst", db, ar.v)
compare("dst", db, "src", ds, ar.v)
if ar.copy:
copy_mtp(ds, db, ar.copy, ar.rm_mtp_flag)
if __name__ == "__main__":
main()

View File

@@ -225,6 +225,19 @@ def run_argparse(argv, formatter):
--ciphers help = available ssl/tls ciphers, --ciphers help = available ssl/tls ciphers,
--ssl-ver help = available ssl/tls versions, --ssl-ver help = available ssl/tls versions,
default is what python considers safe, usually >= TLS1 default is what python considers safe, usually >= TLS1
values for --ls:
"USR" is a user to browse as; * is anonymous, ** is all users
"VOL" is a single volume to scan, default is * (all vols)
"FLAG" is flags;
"v" in addition to realpaths, print usernames and vpaths
"ln" only prints symlinks leaving the volume mountpoint
"p" exits 1 if any such symlinks are found
"r" resumes startup after the listing
examples:
--ls '**' # list all files which are possible to read
--ls '**,*,ln' # check for dangerous symlinks
--ls '**,*,ln,p,r' # check, then start normally if safe
""" """
), ),
) )
@@ -249,6 +262,10 @@ def run_argparse(argv, formatter):
ap.add_argument("--urlform", metavar="MODE", type=str, default="print,get", help="how to handle url-forms") ap.add_argument("--urlform", metavar="MODE", type=str, default="print,get", help="how to handle url-forms")
ap.add_argument("--salt", type=str, default="hunter2", help="up2k file-hash salt") ap.add_argument("--salt", type=str, default="hunter2", help="up2k file-hash salt")
ap2 = ap.add_argument_group('admin panel options')
ap2.add_argument("--no-rescan", action="store_true", help="disable ?scan (volume reindexing)")
ap2.add_argument("--no-stack", action="store_true", help="disable ?stack (list all stacks)")
ap2 = ap.add_argument_group('thumbnail options') ap2 = ap.add_argument_group('thumbnail options')
ap2.add_argument("--no-thumb", action="store_true", help="disable all thumbnails") ap2.add_argument("--no-thumb", action="store_true", help="disable all thumbnails")
ap2.add_argument("--no-vthumb", action="store_true", help="disable video thumbnails") ap2.add_argument("--no-vthumb", action="store_true", help="disable video thumbnails")
@@ -257,7 +274,7 @@ def run_argparse(argv, formatter):
ap2.add_argument("--th-no-jpg", action="store_true", help="disable jpg output") ap2.add_argument("--th-no-jpg", action="store_true", help="disable jpg output")
ap2.add_argument("--th-no-webp", action="store_true", help="disable webp output") ap2.add_argument("--th-no-webp", action="store_true", help="disable webp output")
ap2.add_argument("--th-poke", metavar="SEC", type=int, default=300, help="activity labeling cooldown") ap2.add_argument("--th-poke", metavar="SEC", type=int, default=300, help="activity labeling cooldown")
ap2.add_argument("--th-clean", metavar="SEC", type=int, default=1800, help="cleanup interval") ap2.add_argument("--th-clean", metavar="SEC", type=int, default=43200, help="cleanup interval")
ap2.add_argument("--th-maxage", metavar="SEC", type=int, default=604800, help="max folder age") ap2.add_argument("--th-maxage", metavar="SEC", type=int, default=604800, help="max folder age")
ap2 = ap.add_argument_group('database options') ap2 = ap.add_argument_group('database options')
@@ -284,9 +301,11 @@ def run_argparse(argv, formatter):
ap2.add_argument("--ssl-log", metavar="PATH", help="log master secrets") ap2.add_argument("--ssl-log", metavar="PATH", help="log master secrets")
ap2 = ap.add_argument_group('debug options') ap2 = ap.add_argument_group('debug options')
ap2.add_argument("--ls", metavar="U[,V[,F]]", help="scan all volumes")
ap2.add_argument("--log-conn", action="store_true", help="print tcp-server msgs") ap2.add_argument("--log-conn", action="store_true", help="print tcp-server msgs")
ap2.add_argument("--no-sendfile", action="store_true", help="disable sendfile") ap2.add_argument("--no-sendfile", action="store_true", help="disable sendfile")
ap2.add_argument("--no-scandir", action="store_true", help="disable scandir") ap2.add_argument("--no-scandir", action="store_true", help="disable scandir")
ap2.add_argument("--no-fastboot", action="store_true", help="wait for up2k indexing")
ap2.add_argument("--ihead", metavar="HEADER", action='append', help="dump incoming header") ap2.add_argument("--ihead", metavar="HEADER", action='append', help="dump incoming header")
ap2.add_argument("--lf-url", metavar="RE", type=str, default=r"^/\.cpr/|\?th=[wj]$", help="dont log URLs matching") ap2.add_argument("--lf-url", metavar="RE", type=str, default=r"^/\.cpr/|\?th=[wj]$", help="dont log URLs matching")

View File

@@ -1,8 +1,8 @@
# coding: utf-8 # coding: utf-8
VERSION = (0, 11, 0) VERSION = (0, 11, 7)
CODENAME = "the grid" CODENAME = "the grid"
BUILD_DT = (2021, 5, 29) BUILD_DT = (2021, 6, 5)
S_VERSION = ".".join(map(str, VERSION)) S_VERSION = ".".join(map(str, VERSION))
S_BUILD_DT = "{0:04d}-{1:02d}-{2:02d}".format(*BUILD_DT) S_BUILD_DT = "{0:04d}-{1:02d}-{2:02d}".format(*BUILD_DT)

View File

@@ -14,11 +14,12 @@ from .util import IMPLICATIONS, undot, Pebkac, fsdec, fsenc, statdir, nuprint
class VFS(object): class VFS(object):
"""single level in the virtual fs""" """single level in the virtual fs"""
def __init__(self, realpath, vpath, uread=[], uwrite=[], flags={}): def __init__(self, realpath, vpath, uread=[], uwrite=[], uadm=[], flags={}):
self.realpath = realpath # absolute path on host filesystem self.realpath = realpath # absolute path on host filesystem
self.vpath = vpath # absolute path in the virtual filesystem self.vpath = vpath # absolute path in the virtual filesystem
self.uread = uread # users who can read this self.uread = uread # users who can read this
self.uwrite = uwrite # users who can write this self.uwrite = uwrite # users who can write this
self.uadm = uadm # users who are regular admins
self.flags = flags # config switches self.flags = flags # config switches
self.nodes = {} # child nodes self.nodes = {} # child nodes
self.all_vols = {vpath: self} # flattened recursive self.all_vols = {vpath: self} # flattened recursive
@@ -27,7 +28,7 @@ class VFS(object):
return "VFS({})".format( return "VFS({})".format(
", ".join( ", ".join(
"{}={!r}".format(k, self.__dict__[k]) "{}={!r}".format(k, self.__dict__[k])
for k in "realpath vpath uread uwrite flags".split() for k in "realpath vpath uread uwrite uadm flags".split()
) )
) )
@@ -52,6 +53,7 @@ class VFS(object):
"{}/{}".format(self.vpath, name).lstrip("/"), "{}/{}".format(self.vpath, name).lstrip("/"),
self.uread, self.uread,
self.uwrite, self.uwrite,
self.uadm,
self.flags, self.flags,
) )
self._trk(vn) self._trk(vn)
@@ -133,7 +135,7 @@ class VFS(object):
# #
return os.path.realpath(rp) return os.path.realpath(rp)
def ls(self, rem, uname, scandir, lstat=False): def ls(self, rem, uname, scandir, incl_wo=False, lstat=False):
"""return user-readable [fsdir,real,virt] items at vpath""" """return user-readable [fsdir,real,virt] items at vpath"""
virt_vis = {} # nodes readable by user virt_vis = {} # nodes readable by user
abspath = self.canonical(rem) abspath = self.canonical(rem)
@@ -141,12 +143,12 @@ class VFS(object):
real.sort() real.sort()
if not rem: if not rem:
for name, vn2 in sorted(self.nodes.items()): for name, vn2 in sorted(self.nodes.items()):
if ( ok = uname in vn2.uread or "*" in vn2.uread
uname in vn2.uread
or "*" in vn2.uread if not ok and incl_wo:
or uname in vn2.uwrite ok = uname in vn2.uwrite or "*" in vn2.uwrite
or "*" in vn2.uwrite
): if ok:
virt_vis[name] = vn2 virt_vis[name] = vn2
# no vfs nodes in the list of real inodes # no vfs nodes in the list of real inodes
@@ -160,7 +162,7 @@ class VFS(object):
rel is a unix-style user-defined vpath (not vfs-related) rel is a unix-style user-defined vpath (not vfs-related)
""" """
fsroot, vfs_ls, vfs_virt = self.ls(rem, uname, scandir, lstat) fsroot, vfs_ls, vfs_virt = self.ls(rem, uname, scandir, False, lstat)
rfiles = [x for x in vfs_ls if not stat.S_ISDIR(x[1].st_mode)] rfiles = [x for x in vfs_ls if not stat.S_ISDIR(x[1].st_mode)]
rdirs = [x for x in vfs_ls if stat.S_ISDIR(x[1].st_mode)] rdirs = [x for x in vfs_ls if stat.S_ISDIR(x[1].st_mode)]
@@ -226,17 +228,19 @@ class VFS(object):
for f in [{"vp": v, "ap": a, "st": n[1]} for v, a, n in files]: for f in [{"vp": v, "ap": a, "st": n[1]} for v, a, n in files]:
yield f yield f
def user_tree(self, uname, readable=False, writable=False): def user_tree(self, uname, readable, writable, admin):
ret = [] is_readable = False
opt1 = readable and (uname in self.uread or "*" in self.uread) if uname in self.uread or "*" in self.uread:
opt2 = writable and (uname in self.uwrite or "*" in self.uwrite) readable.append(self.vpath)
if opt1 or opt2: is_readable = True
ret.append(self.vpath)
if uname in self.uwrite or "*" in self.uwrite:
writable.append(self.vpath)
if is_readable:
admin.append(self.vpath)
for _, vn in sorted(self.nodes.items()): for _, vn in sorted(self.nodes.items()):
ret.extend(vn.user_tree(uname, readable, writable)) vn.user_tree(uname, readable, writable, admin)
return ret
class AuthSrv(object): class AuthSrv(object):
@@ -269,7 +273,7 @@ class AuthSrv(object):
yield prev, True yield prev, True
def _parse_config_file(self, fd, user, mread, mwrite, mflags, mount): def _parse_config_file(self, fd, user, mread, mwrite, madm, mflags, mount):
vol_src = None vol_src = None
vol_dst = None vol_dst = None
self.line_ctr = 0 self.line_ctr = 0
@@ -301,6 +305,7 @@ class AuthSrv(object):
mount[vol_dst] = vol_src mount[vol_dst] = vol_src
mread[vol_dst] = [] mread[vol_dst] = []
mwrite[vol_dst] = [] mwrite[vol_dst] = []
madm[vol_dst] = []
mflags[vol_dst] = {} mflags[vol_dst] = {}
continue continue
@@ -311,10 +316,15 @@ class AuthSrv(object):
uname = "*" uname = "*"
self._read_vol_str( self._read_vol_str(
lvl, uname, mread[vol_dst], mwrite[vol_dst], mflags[vol_dst] lvl,
uname,
mread[vol_dst],
mwrite[vol_dst],
madm[vol_dst],
mflags[vol_dst],
) )
def _read_vol_str(self, lvl, uname, mr, mw, mf): def _read_vol_str(self, lvl, uname, mr, mw, ma, mf):
if lvl == "c": if lvl == "c":
cval = True cval = True
if "=" in uname: if "=" in uname:
@@ -332,6 +342,9 @@ class AuthSrv(object):
if lvl in "wa": if lvl in "wa":
mw.append(uname) mw.append(uname)
if lvl == "a":
ma.append(uname)
def _read_volflag(self, flags, name, value, is_list): def _read_volflag(self, flags, name, value, is_list):
if name not in ["mtp"]: if name not in ["mtp"]:
flags[name] = value flags[name] = value
@@ -355,6 +368,7 @@ class AuthSrv(object):
user = {} # username:password user = {} # username:password
mread = {} # mountpoint:[username] mread = {} # mountpoint:[username]
mwrite = {} # mountpoint:[username] mwrite = {} # mountpoint:[username]
madm = {} # mountpoint:[username]
mflags = {} # mountpoint:[flag] mflags = {} # mountpoint:[flag]
mount = {} # dst:src (mountpoint:realpath) mount = {} # dst:src (mountpoint:realpath)
@@ -378,17 +392,22 @@ class AuthSrv(object):
mount[dst] = src mount[dst] = src
mread[dst] = [] mread[dst] = []
mwrite[dst] = [] mwrite[dst] = []
madm[dst] = []
mflags[dst] = {} mflags[dst] = {}
perms = perms.split(":") perms = perms.split(":")
for (lvl, uname) in [[x[0], x[1:]] for x in perms]: for (lvl, uname) in [[x[0], x[1:]] for x in perms]:
self._read_vol_str(lvl, uname, mread[dst], mwrite[dst], mflags[dst]) self._read_vol_str(
lvl, uname, mread[dst], mwrite[dst], madm[dst], mflags[dst]
)
if self.args.c: if self.args.c:
for cfg_fn in self.args.c: for cfg_fn in self.args.c:
with open(cfg_fn, "rb") as f: with open(cfg_fn, "rb") as f:
try: try:
self._parse_config_file(f, user, mread, mwrite, mflags, mount) self._parse_config_file(
f, user, mread, mwrite, madm, mflags, mount
)
except: except:
m = "\n\033[1;31m\nerror in config file {} on line {}:\n\033[0m" m = "\n\033[1;31m\nerror in config file {} on line {}:\n\033[0m"
print(m.format(cfg_fn, self.line_ctr)) print(m.format(cfg_fn, self.line_ctr))
@@ -410,12 +429,15 @@ class AuthSrv(object):
if dst == "": if dst == "":
# rootfs was mapped; fully replaces the default CWD vfs # rootfs was mapped; fully replaces the default CWD vfs
vfs = VFS(mount[dst], dst, mread[dst], mwrite[dst], mflags[dst]) vfs = VFS(
mount[dst], dst, mread[dst], mwrite[dst], madm[dst], mflags[dst]
)
continue continue
v = vfs.add(mount[dst], dst) v = vfs.add(mount[dst], dst)
v.uread = mread[dst] v.uread = mread[dst]
v.uwrite = mwrite[dst] v.uwrite = mwrite[dst]
v.uadm = madm[dst]
v.flags = mflags[dst] v.flags = mflags[dst]
missing_users = {} missing_users = {}
@@ -535,3 +557,90 @@ class AuthSrv(object):
# import pprint # import pprint
# pprint.pprint({"usr": user, "rd": mread, "wr": mwrite, "mnt": mount}) # pprint.pprint({"usr": user, "rd": mread, "wr": mwrite, "mnt": mount})
def dbg_ls(self):
users = self.args.ls
vols = "*"
flags = []
try:
users, vols = users.split(",", 1)
except:
pass
try:
vols, flags = vols.split(",", 1)
flags = flags.split(",")
except:
pass
if users == "**":
users = list(self.user.keys()) + ["*"]
else:
users = [users]
for u in users:
if u not in self.user and u != "*":
raise Exception("user not found: " + u)
if vols == "*":
vols = ["/" + x for x in self.vfs.all_vols.keys()]
else:
vols = [vols]
for v in vols:
if not v.startswith("/"):
raise Exception("volumes must start with /")
if v[1:] not in self.vfs.all_vols:
raise Exception("volume not found: " + v)
self.log({"users": users, "vols": vols, "flags": flags})
for k, v in self.vfs.all_vols.items():
self.log("/{}: read({}) write({})".format(k, v.uread, v.uwrite))
flag_v = "v" in flags
flag_ln = "ln" in flags
flag_p = "p" in flags
flag_r = "r" in flags
n_bads = 0
for v in vols:
v = v[1:]
vtop = "/{}/".format(v) if v else "/"
for u in users:
self.log("checking /{} as {}".format(v, u))
try:
vn, _ = self.vfs.get(v, u, True, False)
except:
continue
atop = vn.realpath
g = vn.walk("", "", u, True, not self.args.no_scandir, lstat=False)
for vpath, apath, files, _, _ in g:
fnames = [n[0] for n in files]
vpaths = [vpath + "/" + n for n in fnames] if vpath else fnames
vpaths = [vtop + x for x in vpaths]
apaths = [os.path.join(apath, n) for n in fnames]
files = list(zip(vpaths, apaths))
if flag_ln:
files = [x for x in files if not x[1].startswith(atop + os.sep)]
n_bads += len(files)
if flag_v:
msg = [
'# user "{}", vpath "{}"\n{}'.format(u, vp, ap)
for vp, ap in files
]
else:
msg = [x[1] for x in files]
if msg:
nuprint("\n".join(msg))
if n_bads and flag_p:
raise Exception("found symlink leaving volume, and strict is set")
if not flag_r:
sys.exit(0)

View File

@@ -10,6 +10,7 @@ import json
import string import string
import socket import socket
import ctypes import ctypes
import traceback
from datetime import datetime from datetime import datetime
import calendar import calendar
@@ -152,9 +153,8 @@ class HttpCli(object):
pwd = uparam.get("pw") pwd = uparam.get("pw")
self.uname = self.auth.iuser.get(pwd, "*") self.uname = self.auth.iuser.get(pwd, "*")
if self.uname: self.rvol, self.wvol, self.avol = [[], [], []]
self.rvol = self.auth.vfs.user_tree(self.uname, readable=True) self.auth.vfs.user_tree(self.uname, self.rvol, self.wvol, self.avol)
self.wvol = self.auth.vfs.user_tree(self.uname, writable=True)
ua = self.headers.get("user-agent", "") ua = self.headers.get("user-agent", "")
self.is_rclone = ua.startswith("rclone/") self.is_rclone = ua.startswith("rclone/")
@@ -326,6 +326,12 @@ class HttpCli(object):
self.vpath = None self.vpath = None
return self.tx_mounts() return self.tx_mounts()
if "scan" in self.uparam:
return self.scanvol()
if "stack" in self.uparam:
return self.tx_stack()
return self.tx_browser() return self.tx_browser()
def handle_options(self): def handle_options(self):
@@ -592,8 +598,9 @@ class HttpCli(object):
taglist = {} taglist = {}
else: else:
# search by query params # search by query params
self.log("qj: " + repr(body)) q = body["q"]
hits, taglist = idx.search(vols, body) self.log("qj: " + q)
hits, taglist = idx.search(vols, q)
msg = len(hits) msg = len(hits)
idx.p_end = time.time() idx.p_end = time.time()
@@ -1302,12 +1309,66 @@ class HttpCli(object):
def tx_mounts(self): def tx_mounts(self):
suf = self.urlq(rm=["h"]) suf = self.urlq(rm=["h"])
rvol = [x + "/" if x else x for x in self.rvol] rvol, wvol, avol = [
wvol = [x + "/" if x else x for x in self.wvol] [("/" + x).rstrip("/") + "/" for x in y]
html = self.j2("splash", this=self, rvol=rvol, wvol=wvol, url_suf=suf) for y in [self.rvol, self.wvol, self.avol]
]
vstate = {}
if self.avol and not self.args.no_rescan:
x = self.conn.hsrv.broker.put(True, "up2k.get_volstate")
vstate = json.loads(x.get())
vstate = {("/" + k).rstrip("/") + "/": v for k, v in vstate.items()}
html = self.j2(
"splash",
this=self,
rvol=rvol,
wvol=wvol,
avol=avol,
vstate=vstate,
url_suf=suf,
)
self.reply(html.encode("utf-8"), headers=NO_STORE) self.reply(html.encode("utf-8"), headers=NO_STORE)
return True return True
def scanvol(self):
if not self.readable or not self.writable:
raise Pebkac(403, "not admin")
if self.args.no_rescan:
raise Pebkac(403, "disabled by argv")
vn, _ = self.auth.vfs.get(self.vpath, self.uname, True, True)
args = [self.auth.vfs.all_vols, [vn.vpath]]
x = self.conn.hsrv.broker.put(True, "up2k.rescan", *args)
x = x.get()
if not x:
self.redirect("", "?h")
return ""
raise Pebkac(500, x)
def tx_stack(self):
if not self.readable or not self.writable:
raise Pebkac(403, "not admin")
if self.args.no_stack:
raise Pebkac(403, "disabled by argv")
ret = []
names = dict([(t.ident, t.name) for t in threading.enumerate()])
for tid, stack in sys._current_frames().items():
ret.append("\n\n# {} ({:x})".format(names.get(tid), tid))
for fn, lno, name, line in traceback.extract_stack(stack):
ret.append('File: "{}", line {}, in {}'.format(fn, lno, name))
if line:
ret.append(" " + str(line.strip()))
ret = ("<pre>" + "\n".join(ret)).encode("utf-8")
self.reply(ret)
def tx_tree(self): def tx_tree(self):
top = self.uparam["tree"] or "" top = self.uparam["tree"] or ""
dst = self.vpath dst = self.vpath
@@ -1337,7 +1398,9 @@ class HttpCli(object):
try: try:
vn, rem = self.auth.vfs.get(top, self.uname, True, False) vn, rem = self.auth.vfs.get(top, self.uname, True, False)
fsroot, vfs_ls, vfs_virt = vn.ls(rem, self.uname, not self.args.no_scandir) fsroot, vfs_ls, vfs_virt = vn.ls(
rem, self.uname, not self.args.no_scandir, True
)
except: except:
vfs_ls = [] vfs_ls = []
vfs_virt = {} vfs_virt = {}
@@ -1502,7 +1565,9 @@ class HttpCli(object):
if v is not None: if v is not None:
return self.tx_zip(k, v, vn, rem, [], self.args.ed) return self.tx_zip(k, v, vn, rem, [], self.args.ed)
fsroot, vfs_ls, vfs_virt = vn.ls(rem, self.uname, not self.args.no_scandir) fsroot, vfs_ls, vfs_virt = vn.ls(
rem, self.uname, not self.args.no_scandir, True
)
stats = {k: v for k, v in vfs_ls} stats = {k: v for k, v in vfs_ls}
vfs_ls = [x[0] for x in vfs_ls] vfs_ls = [x[0] for x in vfs_ls]
vfs_ls.extend(vfs_virt.keys()) vfs_ls.extend(vfs_virt.keys())

View File

@@ -39,6 +39,8 @@ class SvcHub(object):
# jank goes here # jank goes here
auth = AuthSrv(self.args, self.log, False) auth = AuthSrv(self.args, self.log, False)
if args.ls:
auth.dbg_ls()
# initiate all services to manage # initiate all services to manage
self.tcpsrv = TcpSrv(self) self.tcpsrv = TcpSrv(self)

View File

@@ -316,10 +316,10 @@ class ThumbSrv(object):
time.sleep(interval) time.sleep(interval)
for vol in self.vols: for vol in self.vols:
vol += "/.hist/th" vol += "/.hist/th"
self.log("cln {}/".format(vol)) self.log("\033[Jcln {}/\033[A".format(vol))
self.clean(vol) self.clean(vol)
self.log("cln ok") self.log("\033[Jcln ok")
def clean(self, vol): def clean(self, vol):
# self.log("cln {}".format(vol)) # self.log("cln {}".format(vol))

View File

@@ -47,11 +47,11 @@ class U2idx(object):
fhash = body["hash"] fhash = body["hash"]
wark = up2k_wark_from_hashlist(self.args.salt, fsize, fhash) wark = up2k_wark_from_hashlist(self.args.salt, fsize, fhash)
uq = "substr(w,1,16) = ? and w = ?" uq = "where substr(w,1,16) = ? and w = ?"
uv = [wark[:16], wark] uv = [wark[:16], wark]
try: try:
return self.run_query(vols, uq, uv, {})[0] return self.run_query(vols, uq, uv)[0]
except Exception as ex: except Exception as ex:
raise Pebkac(500, repr(ex)) raise Pebkac(500, repr(ex))
@@ -67,37 +67,121 @@ class U2idx(object):
self.cur[ptop] = cur self.cur[ptop] = cur
return cur return cur
def search(self, vols, body): def search(self, vols, uq):
"""search by query params""" """search by query params"""
if not HAVE_SQLITE3: if not HAVE_SQLITE3:
return [] return []
qobj = {} q = ""
_conv_sz(qobj, body, "sz_min", "up.sz >= ?") va = []
_conv_sz(qobj, body, "sz_max", "up.sz <= ?") joins = ""
_conv_dt(qobj, body, "dt_min", "up.mt >= ?") is_key = True
_conv_dt(qobj, body, "dt_max", "up.mt <= ?") is_size = False
for seg, dk in [["path", "up.rd"], ["name", "up.fn"]]: is_date = False
if seg in body: kw_key = ["(", ")", "and ", "or ", "not "]
_conv_txt(qobj, body, seg, dk) kw_val = ["==", "=", "!=", ">", ">=", "<", "<=", "like "]
ptn_mt = re.compile(r"^\.?[a-z]+$")
mt_ctr = 0
mt_keycmp = "substr(up.w,1,16)"
mt_keycmp2 = None
uq, uv = _sqlize(qobj) while True:
uq = uq.strip()
if not uq:
break
qobj = {} ok = False
if "tags" in body: for kw in kw_key + kw_val:
_conv_txt(qobj, body, "tags", "mt.v") if uq.startswith(kw):
is_key = kw in kw_key
uq = uq[len(kw) :]
ok = True
q += kw
break
if "adv" in body: if ok:
_conv_adv(qobj, body, "adv") continue
v, uq = (uq + " ").split(" ", 1)
if is_key:
is_key = False
if v == "size":
v = "up.sz"
is_size = True
elif v == "date":
v = "up.mt"
is_date = True
elif v == "path":
v = "up.rd"
elif v == "name":
v = "up.fn"
elif v == "tags" or ptn_mt.match(v):
mt_ctr += 1
mt_keycmp2 = "mt{}.w".format(mt_ctr)
joins += "inner join mt mt{} on {} = {} ".format(
mt_ctr, mt_keycmp, mt_keycmp2
)
mt_keycmp = mt_keycmp2
if v == "tags":
v = "mt{0}.v".format(mt_ctr)
else:
v = "+mt{0}.k = '{1}' and mt{0}.v".format(mt_ctr, v)
else:
raise Pebkac(400, "invalid key [" + v + "]")
q += v + " "
continue
head = ""
tail = ""
if is_date:
is_date = False
v = v.upper().rstrip("Z").replace(",", " ").replace("T", " ")
while " " in v:
v = v.replace(" ", " ")
for fmt in [
"%Y-%m-%d %H:%M:%S",
"%Y-%m-%d %H:%M",
"%Y-%m-%d %H",
"%Y-%m-%d",
]:
try:
v = datetime.strptime(v, fmt).timestamp()
break
except:
pass
elif is_size:
is_size = False
v = int(float(v) * 1024 * 1024)
else:
if v.startswith("*"):
head = "'%'||"
v = v[1:]
if v.endswith("*"):
tail = "||'%'"
v = v[:-1]
q += " {}?{} ".format(head, tail)
va.append(v)
is_key = True
try: try:
return self.run_query(vols, uq, uv, qobj) return self.run_query(vols, joins + "where " + q, va)
except Exception as ex: except Exception as ex:
raise Pebkac(500, repr(ex)) raise Pebkac(500, repr(ex))
def run_query(self, vols, uq, uv, targs): def run_query(self, vols, uq, uv):
self.log("qs: {} {} , {}".format(uq, repr(uv), repr(targs)))
done_flag = [] done_flag = []
self.active_id = "{:.6f}_{}".format( self.active_id = "{:.6f}_{}".format(
time.time(), threading.current_thread().ident time.time(), threading.current_thread().ident
@@ -112,35 +196,14 @@ class U2idx(object):
thr.daemon = True thr.daemon = True
thr.start() thr.start()
if not targs: if not uq or not uv:
if not uq: q = "select * from up"
q = "select * from up" v = ()
v = ()
else:
q = "select * from up where " + uq
v = tuple(uv)
else: else:
q = "select up.* from up" q = "select up.* from up " + uq
keycmp = "substr(up.w,1,16)" v = tuple(uv)
where = []
v = []
ctr = 0
for tq, tv in sorted(targs.items()):
ctr += 1
tq = tq.split("\n")[0]
keycmp2 = "mt{}.w".format(ctr)
q += " inner join mt mt{} on {} = {}".format(ctr, keycmp, keycmp2)
keycmp = keycmp2
where.append(tq.replace("mt.", keycmp[:-1]))
v.append(tv)
if uq: self.log("qs: {!r} {!r}".format(q, v))
where.append(uq)
v.extend(uv)
q += " where " + (" and ".join(where))
# self.log("q2: {} {}".format(q, repr(v)))
ret = [] ret = []
lim = 1000 lim = 1000
@@ -163,7 +226,7 @@ class U2idx(object):
if rd.startswith("//") or fn.startswith("//"): if rd.startswith("//") or fn.startswith("//"):
rd, fn = s3dec(rd, fn) rd, fn = s3dec(rd, fn)
rp = "/".join([vtop, rd, fn]) rp = "/".join([x for x in [vtop, rd, fn] if x])
sret.append({"ts": int(ts), "sz": sz, "rp": rp, "w": w[:16]}) sret.append({"ts": int(ts), "sz": sz, "rp": rp, "w": w[:16]})
for hit in sret: for hit in sret:
@@ -204,78 +267,3 @@ def _open(ptop):
db_path = os.path.join(ptop, ".hist", "up2k.db") db_path = os.path.join(ptop, ".hist", "up2k.db")
if os.path.exists(db_path): if os.path.exists(db_path):
return sqlite3.connect(db_path).cursor() return sqlite3.connect(db_path).cursor()
def _conv_sz(q, body, k, sql):
if k in body:
q[sql] = int(float(body[k]) * 1024 * 1024)
def _conv_dt(q, body, k, sql):
if k not in body:
return
v = body[k].upper().rstrip("Z").replace(",", " ").replace("T", " ")
while " " in v:
v = v.replace(" ", " ")
for fmt in ["%Y-%m-%d %H:%M:%S", "%Y-%m-%d %H:%M", "%Y-%m-%d %H", "%Y-%m-%d"]:
try:
ts = datetime.strptime(v, fmt).timestamp()
break
except:
ts = None
if ts:
q[sql] = ts
def _conv_txt(q, body, k, sql):
for v in body[k].split(" "):
inv = ""
if v.startswith("-"):
inv = "not"
v = v[1:]
if not v:
continue
head = "'%'||"
if v.startswith("^"):
head = ""
v = v[1:]
tail = "||'%'"
if v.endswith("$"):
tail = ""
v = v[:-1]
qk = "{} {} like {}?{}".format(sql, inv, head, tail)
q[qk + "\n" + v] = u8safe(v)
def _conv_adv(q, body, k):
ptn = re.compile(r"^(\.?[a-z]+) *(==?|!=|<=?|>=?) *(.*)$")
parts = body[k].split(" ")
parts = [x.strip() for x in parts if x.strip()]
for part in parts:
m = ptn.match(part)
if not m:
p = html_escape(part)
raise Pebkac(400, "invalid argument [" + p + "]")
k, op, v = m.groups()
qk = "mt.k = '{}' and mt.v {} ?".format(k, op)
q[qk + "\n" + v] = u8safe(v)
def _sqlize(qobj):
keys = []
values = []
for k, v in sorted(qobj.items()):
keys.append(k.split("\n")[0])
values.append(v)
return " and ".join(keys), values

View File

@@ -52,7 +52,6 @@ class Up2k(object):
self.hub = hub self.hub = hub
self.args = hub.args self.args = hub.args
self.log_func = hub.log self.log_func = hub.log
self.all_vols = all_vols
# config # config
self.salt = self.args.salt self.salt = self.args.salt
@@ -61,12 +60,14 @@ class Up2k(object):
self.mutex = threading.Lock() self.mutex = threading.Lock()
self.hashq = Queue() self.hashq = Queue()
self.tagq = Queue() self.tagq = Queue()
self.volstate = {}
self.registry = {} self.registry = {}
self.entags = {} self.entags = {}
self.flags = {} self.flags = {}
self.cur = {} self.cur = {}
self.mtag = None self.mtag = None
self.pending_tags = None self.pending_tags = None
self.mtp_parsers = {}
self.mem_cur = None self.mem_cur = None
self.sqlite_ver = None self.sqlite_ver = None
@@ -92,7 +93,15 @@ class Up2k(object):
if not HAVE_SQLITE3: if not HAVE_SQLITE3:
self.log("could not initialize sqlite3, will use in-memory registry only") self.log("could not initialize sqlite3, will use in-memory registry only")
have_e2d = self.init_indexes() if self.args.no_fastboot:
self.deferred_init(all_vols)
else:
t = threading.Thread(target=self.deferred_init, args=(all_vols,))
t.daemon = True
t.start()
def deferred_init(self, all_vols):
have_e2d = self.init_indexes(all_vols)
if have_e2d: if have_e2d:
thr = threading.Thread(target=self._snapshot) thr = threading.Thread(target=self._snapshot)
@@ -115,6 +124,19 @@ class Up2k(object):
def log(self, msg, c=0): def log(self, msg, c=0):
self.log_func("up2k", msg + "\033[K", c) self.log_func("up2k", msg + "\033[K", c)
def get_volstate(self):
return json.dumps(self.volstate, indent=4)
def rescan(self, all_vols, scan_vols):
if hasattr(self, "pp"):
return "cannot initiate; scan is already in progress"
args = (all_vols, scan_vols)
t = threading.Thread(target=self.init_indexes, args=args)
t.daemon = True
t.start()
return None
def _vis_job_progress(self, job): def _vis_job_progress(self, job):
perc = 100 - (len(job["need"]) * 100.0 / len(job["hash"])) perc = 100 - (len(job["need"]) * 100.0 / len(job["hash"]))
path = os.path.join(job["ptop"], job["prel"], job["name"]) path = os.path.join(job["ptop"], job["prel"], job["name"])
@@ -137,9 +159,9 @@ class Up2k(object):
return True, ret return True, ret
def init_indexes(self): def init_indexes(self, all_vols, scan_vols=[]):
self.pp = ProgressPrinter() self.pp = ProgressPrinter()
vols = self.all_vols.values() vols = all_vols.values()
t0 = time.time() t0 = time.time()
have_e2d = False have_e2d = False
@@ -159,24 +181,35 @@ class Up2k(object):
for vol in vols: for vol in vols:
try: try:
os.listdir(vol.realpath) os.listdir(vol.realpath)
live_vols.append(vol)
except: except:
self.volstate[vol.vpath] = "OFFLINE (cannot access folder)"
self.log("cannot access " + vol.realpath, c=1) self.log("cannot access " + vol.realpath, c=1)
continue
if not self.register_vpath(vol.realpath, vol.flags):
# self.log("db not enabled for {}".format(m, vol.realpath))
continue
if vol.vpath in scan_vols or not scan_vols:
live_vols.append(vol)
if vol.vpath not in self.volstate:
self.volstate[vol.vpath] = "OFFLINE (pending initialization)"
vols = live_vols vols = live_vols
need_vac = {}
need_mtag = False need_mtag = False
for vol in vols: for vol in vols:
if "e2t" in vol.flags: if "e2t" in vol.flags:
need_mtag = True need_mtag = True
if need_mtag: if need_mtag and not self.mtag:
self.mtag = MTag(self.log_func, self.args) self.mtag = MTag(self.log_func, self.args)
if not self.mtag.usable: if not self.mtag.usable:
self.mtag = None self.mtag = None
# e2ds(a) volumes first, # e2ds(a) volumes first
# also covers tags where e2ts is set
for vol in vols: for vol in vols:
en = {} en = {}
if "mte" in vol.flags: if "mte" in vol.flags:
@@ -188,26 +221,45 @@ class Up2k(object):
have_e2d = True have_e2d = True
if "e2ds" in vol.flags: if "e2ds" in vol.flags:
r = self._build_file_index(vol, vols) self.volstate[vol.vpath] = "busy (hashing files)"
if not r: _, vac = self._build_file_index(vol, list(all_vols.values()))
needed_mutagen = True if vac:
need_vac[vol] = True
if "e2ts" not in vol.flags:
m = "online, idle"
else:
m = "online (tags pending)"
self.volstate[vol.vpath] = m
# open the rest + do any e2ts(a) # open the rest + do any e2ts(a)
needed_mutagen = False needed_mutagen = False
for vol in vols: for vol in vols:
r = self.register_vpath(vol.realpath, vol.flags) if "e2ts" not in vol.flags:
if not r or "e2ts" not in vol.flags:
continue continue
cur, db_path, sz0 = r m = "online (reading tags)"
n_add, n_rm, success = self._build_tags_index(vol.realpath) self.volstate[vol.vpath] = m
self.log("{} [{}]".format(m, vol.realpath))
nadd, nrm, success = self._build_tags_index(vol)
if not success: if not success:
needed_mutagen = True needed_mutagen = True
if n_add or n_rm: if nadd or nrm:
self.vac(cur, db_path, n_add, n_rm, sz0) need_vac[vol] = True
self.volstate[vol.vpath] = "online (mtp soon)"
for vol in need_vac:
cur, _ = self.register_vpath(vol.realpath, vol.flags)
with self.mutex:
cur.connection.commit()
cur.execute("vacuum")
self.pp.end = True self.pp.end = True
msg = "{} volumes in {:.2f} sec" msg = "{} volumes in {:.2f} sec"
self.log(msg.format(len(vols), time.time() - t0)) self.log(msg.format(len(vols), time.time() - t0))
@@ -215,110 +267,104 @@ class Up2k(object):
msg = "could not read tags because no backends are available (mutagen or ffprobe)" msg = "could not read tags because no backends are available (mutagen or ffprobe)"
self.log(msg, c=1) self.log(msg, c=1)
thr = None
if self.mtag:
m = "online (running mtp)"
if scan_vols:
thr = threading.Thread(target=self._run_all_mtp)
thr.daemon = True
else:
del self.pp
m = "online, idle"
for vol in vols:
self.volstate[vol.vpath] = m
if thr:
thr.start()
return have_e2d return have_e2d
def register_vpath(self, ptop, flags): def register_vpath(self, ptop, flags):
with self.mutex: db_path = os.path.join(ptop, ".hist", "up2k.db")
if ptop in self.registry: if ptop in self.registry:
return None return [self.cur[ptop], db_path]
_, flags = self._expr_idx_filter(flags) _, flags = self._expr_idx_filter(flags)
ft = "\033[0;32m{}{:.0}" ft = "\033[0;32m{}{:.0}"
ff = "\033[0;35m{}{:.0}" ff = "\033[0;35m{}{:.0}"
fv = "\033[0;36m{}:\033[1;30m{}" fv = "\033[0;36m{}:\033[1;30m{}"
a = [ a = [
(ft if v is True else ff if v is False else fv).format(k, str(v)) (ft if v is True else ff if v is False else fv).format(k, str(v))
for k, v in flags.items() for k, v in flags.items()
] ]
if a: if a:
self.log(" ".join(sorted(a)) + "\033[0m") self.log(" ".join(sorted(a)) + "\033[0m")
reg = {} reg = {}
path = os.path.join(ptop, ".hist", "up2k.snap") path = os.path.join(ptop, ".hist", "up2k.snap")
if "e2d" in flags and os.path.exists(path): if "e2d" in flags and os.path.exists(path):
with gzip.GzipFile(path, "rb") as f: with gzip.GzipFile(path, "rb") as f:
j = f.read().decode("utf-8") j = f.read().decode("utf-8")
reg2 = json.loads(j) reg2 = json.loads(j)
for k, job in reg2.items(): for k, job in reg2.items():
path = os.path.join(job["ptop"], job["prel"], job["name"]) path = os.path.join(job["ptop"], job["prel"], job["name"])
if os.path.exists(fsenc(path)): if os.path.exists(fsenc(path)):
reg[k] = job reg[k] = job
job["poke"] = time.time() job["poke"] = time.time()
else: else:
self.log("ign deleted file in snap: [{}]".format(path)) self.log("ign deleted file in snap: [{}]".format(path))
m = "loaded snap {} |{}|".format(path, len(reg.keys())) m = "loaded snap {} |{}|".format(path, len(reg.keys()))
m = [m] + self._vis_reg_progress(reg) m = [m] + self._vis_reg_progress(reg)
self.log("\n".join(m)) self.log("\n".join(m))
self.flags[ptop] = flags
self.registry[ptop] = reg
if not HAVE_SQLITE3 or "e2d" not in flags or "d2d" in flags:
return None
try:
os.mkdir(os.path.join(ptop, ".hist"))
except:
pass
db_path = os.path.join(ptop, ".hist", "up2k.db")
if ptop in self.cur:
return None
try:
sz0 = 0
if os.path.exists(db_path):
sz0 = os.path.getsize(db_path) // 1024
cur = self._open_db(db_path)
self.cur[ptop] = cur
return [cur, db_path, sz0]
except:
msg = "cannot use database at [{}]:\n{}"
self.log(msg.format(ptop, traceback.format_exc()))
self.flags[ptop] = flags
self.registry[ptop] = reg
if not HAVE_SQLITE3 or "e2d" not in flags or "d2d" in flags:
return None return None
try:
os.mkdir(os.path.join(ptop, ".hist"))
except:
pass
try:
cur = self._open_db(db_path)
self.cur[ptop] = cur
return [cur, db_path]
except:
msg = "cannot use database at [{}]:\n{}"
self.log(msg.format(ptop, traceback.format_exc()))
return None
def _build_file_index(self, vol, all_vols): def _build_file_index(self, vol, all_vols):
do_vac = False do_vac = False
top = vol.realpath top = vol.realpath
reg = self.register_vpath(top, vol.flags) with self.mutex:
if not reg: cur, _ = self.register_vpath(top, vol.flags)
return
_, db_path, sz0 = reg dbw = [cur, 0, time.time()]
dbw = [reg[0], 0, time.time()] self.pp.n = next(dbw[0].execute("select count(w) from up"))[0]
self.pp.n = next(dbw[0].execute("select count(w) from up"))[0]
excl = [ excl = [
vol.realpath + "/" + d.vpath[len(vol.vpath) :].lstrip("/") vol.realpath + "/" + d.vpath[len(vol.vpath) :].lstrip("/")
for d in all_vols for d in all_vols
if d != vol and (d.vpath.startswith(vol.vpath + "/") or not vol.vpath) if d != vol and (d.vpath.startswith(vol.vpath + "/") or not vol.vpath)
] ]
n_add = self._build_dir(dbw, top, set(excl), top) if WINDOWS:
n_rm = self._drop_lost(dbw[0], top) excl = [x.replace("/", "\\") for x in excl]
if dbw[1]:
self.log("commit {} new files".format(dbw[1]))
dbw[0].connection.commit()
n_add, n_rm, success = self._build_tags_index(vol.realpath) n_add = self._build_dir(dbw, top, set(excl), top)
n_rm = self._drop_lost(dbw[0], top)
if dbw[1]:
self.log("commit {} new files".format(dbw[1]))
dbw[0].connection.commit()
dbw[0].connection.commit() return True, n_add or n_rm or do_vac
if n_add or n_rm or do_vac:
self.vac(dbw[0], db_path, n_add, n_rm, sz0)
return success
def vac(self, cur, db_path, n_add, n_rm, sz0):
sz1 = os.path.getsize(db_path) // 1024
cur.execute("vacuum")
sz2 = os.path.getsize(db_path) // 1024
msg = "{} new, {} del, {} kB vacced, {} kB gain, {} kB now".format(
n_add, n_rm, sz1 - sz2, sz2 - sz0, sz2
)
self.log(msg)
def _build_dir(self, dbw, top, excl, cdir): def _build_dir(self, dbw, top, excl, cdir):
self.pp.msg = "a{} {}".format(self.pp.n, cdir) self.pp.msg = "a{} {}".format(self.pp.n, cdir)
@@ -413,45 +459,53 @@ class Up2k(object):
return len(rm) return len(rm)
def _build_tags_index(self, ptop): def _build_tags_index(self, vol):
entags = self.entags[ptop] ptop = vol.realpath
flags = self.flags[ptop] with self.mutex:
cur = self.cur[ptop] _, db_path = self.register_vpath(ptop, vol.flags)
entags = self.entags[ptop]
flags = self.flags[ptop]
cur = self.cur[ptop]
n_add = 0 n_add = 0
n_rm = 0 n_rm = 0
n_buf = 0 n_buf = 0
last_write = time.time() last_write = time.time()
if "e2tsr" in flags: if "e2tsr" in flags:
n_rm = cur.execute("select count(w) from mt").fetchone()[0] with self.mutex:
if n_rm: n_rm = cur.execute("select count(w) from mt").fetchone()[0]
self.log("discarding {} media tags for a full rescan".format(n_rm)) if n_rm:
cur.execute("delete from mt") self.log("discarding {} media tags for a full rescan".format(n_rm))
else: cur.execute("delete from mt")
self.log("volume has e2tsr but there are no media tags to discard")
# integrity: drop tags for tracks that were deleted # integrity: drop tags for tracks that were deleted
if "e2t" in flags: if "e2t" in flags:
drops = [] with self.mutex:
c2 = cur.connection.cursor() drops = []
up_q = "select w from up where substr(w,1,16) = ?" c2 = cur.connection.cursor()
for (w,) in cur.execute("select w from mt"): up_q = "select w from up where substr(w,1,16) = ?"
if not c2.execute(up_q, (w,)).fetchone(): for (w,) in cur.execute("select w from mt"):
drops.append(w[:16]) if not c2.execute(up_q, (w,)).fetchone():
c2.close() drops.append(w[:16])
c2.close()
if drops: if drops:
msg = "discarding media tags for {} deleted files" msg = "discarding media tags for {} deleted files"
self.log(msg.format(len(drops))) self.log(msg.format(len(drops)))
n_rm += len(drops) n_rm += len(drops)
for w in drops: for w in drops:
cur.execute("delete from mt where w = ?", (w,)) cur.execute("delete from mt where w = ?", (w,))
# bail if a volume flag disables indexing # bail if a volume flag disables indexing
if "d2t" in flags or "d2d" in flags: if "d2t" in flags or "d2d" in flags:
return n_add, n_rm, True return n_add, n_rm, True
# add tags for new files # add tags for new files
gcur = cur
with self.mutex:
gcur.connection.commit()
if "e2ts" in flags: if "e2ts" in flags:
if not self.mtag: if not self.mtag:
return n_add, n_rm, False return n_add, n_rm, False
@@ -460,8 +514,10 @@ class Up2k(object):
if self.mtag.prefer_mt and not self.args.no_mtag_mt: if self.mtag.prefer_mt and not self.args.no_mtag_mt:
mpool = self._start_mpool() mpool = self._start_mpool()
c2 = cur.connection.cursor() conn = sqlite3.connect(db_path, timeout=15)
c3 = cur.connection.cursor() cur = conn.cursor()
c2 = conn.cursor()
c3 = conn.cursor()
n_left = cur.execute("select count(w) from up").fetchone()[0] n_left = cur.execute("select count(w) from up").fetchone()[0]
for w, rd, fn in cur.execute("select w, rd, fn from up"): for w, rd, fn in cur.execute("select w, rd, fn from up"):
n_left -= 1 n_left -= 1
@@ -483,7 +539,8 @@ class Up2k(object):
n_tags = self._tag_file(c3, *args) n_tags = self._tag_file(c3, *args)
else: else:
mpool.put(["mtag"] + args) mpool.put(["mtag"] + args)
n_tags = len(self._flush_mpool(c3)) with self.mutex:
n_tags = len(self._flush_mpool(c3))
n_add += n_tags n_add += n_tags
n_buf += n_tags n_buf += n_tags
@@ -495,26 +552,33 @@ class Up2k(object):
last_write = time.time() last_write = time.time()
n_buf = 0 n_buf = 0
self._stop_mpool(mpool, c3) if mpool:
self._stop_mpool(mpool)
with self.mutex:
n_add += len(self._flush_mpool(c3))
conn.commit()
c3.close() c3.close()
c2.close() c2.close()
cur.close()
conn.close()
with self.mutex:
gcur.connection.commit()
return n_add, n_rm, True return n_add, n_rm, True
def _flush_mpool(self, wcur): def _flush_mpool(self, wcur):
with self.mutex: ret = []
ret = [] for x in self.pending_tags:
for x in self.pending_tags: self._tag_file(wcur, *x)
self._tag_file(wcur, *x) ret.append(x[1])
ret.append(x[1])
self.pending_tags = [] self.pending_tags = []
return ret return ret
def _run_all_mtp(self): def _run_all_mtp(self):
t0 = time.time() t0 = time.time()
self.mtp_parsers = {}
for ptop, flags in self.flags.items(): for ptop, flags in self.flags.items():
if "mtp" in flags: if "mtp" in flags:
self._run_one_mtp(ptop) self._run_one_mtp(ptop)
@@ -523,10 +587,12 @@ class Up2k(object):
msg = "mtp finished in {:.2f} sec ({})" msg = "mtp finished in {:.2f} sec ({})"
self.log(msg.format(td, s2hms(td, True))) self.log(msg.format(td, s2hms(td, True)))
def _run_one_mtp(self, ptop): del self.pp
db_path = os.path.join(ptop, ".hist", "up2k.db") for k in list(self.volstate.keys()):
sz0 = os.path.getsize(db_path) // 1024 if "OFFLINE" not in self.volstate[k]:
self.volstate[k] = "online, idle"
def _run_one_mtp(self, ptop):
entags = self.entags[ptop] entags = self.entags[ptop]
parsers = {} parsers = {}
@@ -585,9 +651,8 @@ class Up2k(object):
jobs.append([parsers, None, w, abspath]) jobs.append([parsers, None, w, abspath])
in_progress[w] = True in_progress[w] = True
done = self._flush_mpool(wcur)
with self.mutex: with self.mutex:
done = self._flush_mpool(wcur)
for w in done: for w in done:
to_delete[w] = True to_delete[w] = True
in_progress.pop(w) in_progress.pop(w)
@@ -628,15 +693,16 @@ class Up2k(object):
with self.mutex: with self.mutex:
cur.connection.commit() cur.connection.commit()
done = self._stop_mpool(mpool, wcur) self._stop_mpool(mpool)
with self.mutex: with self.mutex:
done = self._flush_mpool(wcur)
for w in done: for w in done:
q = "delete from mt where w = ? and k = 't:mtp'" q = "delete from mt where w = ? and k = 't:mtp'"
cur.execute(q, (w,)) cur.execute(q, (w,))
cur.connection.commit() cur.connection.commit()
if n_done: if n_done:
self.vac(cur, db_path, n_done, 0, sz0) cur.execute("vacuum")
wcur.close() wcur.close()
cur.close() cur.close()
@@ -693,7 +759,7 @@ class Up2k(object):
return mpool return mpool
def _stop_mpool(self, mpool, wcur): def _stop_mpool(self, mpool):
if not mpool: if not mpool:
return return
@@ -701,8 +767,6 @@ class Up2k(object):
mpool.put(None) mpool.put(None)
mpool.join() mpool.join()
done = self._flush_mpool(wcur)
return done
def _tag_thr(self, q): def _tag_thr(self, q):
while True: while True:
@@ -1181,12 +1245,15 @@ class Up2k(object):
return wark return wark
def _hashlist_from_file(self, path): def _hashlist_from_file(self, path):
pp = self.pp if hasattr(self, "pp") else None
fsz = os.path.getsize(fsenc(path)) fsz = os.path.getsize(fsenc(path))
csz = up2k_chunksize(fsz) csz = up2k_chunksize(fsz)
ret = [] ret = []
with open(fsenc(path), "rb", 512 * 1024) as f: with open(fsenc(path), "rb", 512 * 1024) as f:
while fsz > 0: while fsz > 0:
self.pp.msg = "{} MB, {}".format(int(fsz / 1024 / 1024), path) if pp:
pp.msg = "{} MB, {}".format(int(fsz / 1024 / 1024), path)
hashobj = hashlib.sha512() hashobj = hashlib.sha512()
rem = min(csz, fsz) rem = min(csz, fsz)
fsz -= rem fsz -= rem

View File

@@ -529,6 +529,17 @@ input[type="checkbox"]:checked+label {
height: 1em; height: 1em;
margin: .2em 0 -1em 1.6em; margin: .2em 0 -1em 1.6em;
} }
#tq_raw {
width: calc(100% - 2em);
margin: .3em 0 0 1.4em;
}
#tq_raw td+td {
width: 100%;
}
#op_search #q_raw {
width: 100%;
display: block;
}
#files td div span { #files td div span {
color: #fff; color: #fff;
padding: 0 .4em; padding: 0 .4em;

View File

@@ -803,7 +803,10 @@ var thegrid = (function () {
r.sz = v; r.sz = v;
swrite('gridsz', r.sz); swrite('gridsz', r.sz);
} }
document.documentElement.style.setProperty('--grid-sz', r.sz + 'em'); try {
document.documentElement.style.setProperty('--grid-sz', r.sz + 'em');
}
catch (ex) { }
} }
setsz(); setsz();
@@ -820,12 +823,25 @@ var thegrid = (function () {
this.setAttribute('class', tr.getAttribute('class')); this.setAttribute('class', tr.getAttribute('class'));
} }
function bgopen(e) {
ev(e);
var url = this.getAttribute('href');
window.open(url, '_blank');
}
r.loadsel = function () { r.loadsel = function () {
var ths = QSA('#ggrid>a'); var ths = QSA('#ggrid>a'),
have_sel = !!QS('#files tr.sel');
for (var a = 0, aa = ths.length; a < aa; a++) { for (var a = 0, aa = ths.length; a < aa; a++) {
ths[a].onclick = r.sel ? seltgl : null; ths[a].onclick = r.sel ? seltgl : have_sel ? bgopen : null;
ths[a].setAttribute('class', ebi(ths[a].getAttribute('ref')).parentNode.parentNode.getAttribute('class')); ths[a].setAttribute('class', ebi(ths[a].getAttribute('ref')).parentNode.parentNode.getAttribute('class'));
} }
var uns = QS('#ggrid a[ref="unsearch"]');
if (uns)
uns.onclick = function () {
ebi('unsearch').click();
};
} }
function loadgrid() { function loadgrid() {
@@ -836,9 +852,9 @@ var thegrid = (function () {
return r.loadsel(); return r.loadsel();
var html = []; var html = [];
var tr = lfiles.tBodies[0].rows; var files = QSA('#files>tbody>tr>td:nth-child(2) a[id]');
for (var a = 0; a < tr.length; a++) { for (var a = 0, aa = files.length; a < aa; a++) {
var ao = tr[a].cells[1].firstChild, var ao = files[a],
href = esc(ao.getAttribute('href')), href = esc(ao.getAttribute('href')),
ref = ao.getAttribute('id'), ref = ao.getAttribute('id'),
isdir = href.split('?')[0].slice(-1)[0] == '/', isdir = href.split('?')[0].slice(-1)[0] == '/',
@@ -960,7 +976,7 @@ document.onkeydown = function (e) {
if (k == 'KeyT') if (k == 'KeyT')
return ebi('thumbs').click(); return ebi('thumbs').click();
if (window['thegrid'] && thegrid.en) { if (thegrid.en) {
if (k == 'KeyS') if (k == 'KeyS')
return ebi('gridsel').click(); return ebi('gridsel').click();
@@ -1026,6 +1042,7 @@ document.onkeydown = function (e) {
for (var a = 0; a < trs.length; a += 2) { for (var a = 0; a < trs.length; a += 2) {
html.push('<table>' + (trs[a].concat(trs[a + 1])).join('\n') + '</table>'); html.push('<table>' + (trs[a].concat(trs[a + 1])).join('\n') + '</table>');
} }
html.push('<table id="tq_raw"><tr><td>raw</td><td><input id="q_raw" type="text" name="q" /></td></tr></table>');
ebi('srch_form').innerHTML = html.join('\n'); ebi('srch_form').innerHTML = html.join('\n');
var o = QSA('#op_search input'); var o = QSA('#op_search input');
@@ -1050,33 +1067,83 @@ document.onkeydown = function (e) {
var chk = ebi(id.slice(0, -1) + 'c'); var chk = ebi(id.slice(0, -1) + 'c');
chk.checked = ((v + '').length > 0); chk.checked = ((v + '').length > 0);
} }
if (id != "q_raw")
encode_query();
clearTimeout(search_timeout); clearTimeout(search_timeout);
if (Date.now() - search_in_progress > 30 * 1000) if (Date.now() - search_in_progress > 30 * 1000)
search_timeout = setTimeout(do_search, 200); search_timeout = setTimeout(do_search, 200);
} }
function encode_query() {
var q = '';
for (var a = 0; a < sconf.length; a++) {
for (var b = 1; b < sconf[a].length; b++) {
var k = sconf[a][b][0],
chk = 'srch_' + k + 'c',
tvs = ebi('srch_' + k + 'v').value.split(/ /g);
if (!ebi(chk).checked)
continue;
for (var c = 0; c < tvs.length; c++) {
var tv = tvs[c];
if (!tv.length)
break;
q += ' and ';
if (k == 'adv') {
q += tv.replace(/ /g, " and ").replace(/([=!><]=?)/, " $1 ");
continue;
}
if (k.length == 3) {
q += k.replace(/sz/, 'size').replace(/dt/, 'date').replace(/l$/, ' >= ').replace(/u$/, ' <= ') + tv;
continue;
}
if (k == 'path' || k == 'name' || k == 'tags') {
var not = ' ';
if (tv.slice(0, 1) == '-') {
tv = tv.slice(1);
not = ' not ';
}
if (tv.slice(0, 1) == '^') {
tv = tv.slice(1);
}
else {
tv = '*' + tv;
}
if (tv.slice(-1) == '$') {
tv = tv.slice(0, -1);
}
else {
tv += '*';
}
q += k + not + 'like ' + tv;
}
}
}
}
ebi('q_raw').value = q.slice(5);
}
function do_search() { function do_search() {
search_in_progress = Date.now(); search_in_progress = Date.now();
srch_msg(false, "searching..."); srch_msg(false, "searching...");
clearTimeout(search_timeout); clearTimeout(search_timeout);
var params = {},
o = QSA('#op_search input[type="text"]');
for (var a = 0; a < o.length; a++) {
var chk = ebi(o[a].getAttribute('id').slice(0, -1) + 'c');
if (!chk.checked)
continue;
params[o[a].getAttribute('name')] = o[a].value;
}
// ebi('srch_q').textContent = JSON.stringify(params, null, 4);
var xhr = new XMLHttpRequest(); var xhr = new XMLHttpRequest();
xhr.open('POST', '/?srch', true); xhr.open('POST', '/?srch', true);
xhr.setRequestHeader('Content-Type', 'text/plain'); xhr.setRequestHeader('Content-Type', 'text/plain');
xhr.onreadystatechange = xhr_search_results; xhr.onreadystatechange = xhr_search_results;
xhr.ts = Date.now(); xhr.ts = Date.now();
xhr.send(JSON.stringify(params)); xhr.send(JSON.stringify({ "q": ebi('q_raw').value }));
} }
function xhr_search_results() { function xhr_search_results() {
@@ -1381,7 +1448,7 @@ var treectl = (function () {
if (hpush) if (hpush)
get_tree('.', xhr.top); get_tree('.', xhr.top);
enspin('#files'); enspin(thegrid.en ? '#gfiles' : '#files');
} }
function treegrow(e) { function treegrow(e) {
@@ -1461,6 +1528,7 @@ var treectl = (function () {
apply_perms(res.perms); apply_perms(res.perms);
despin('#files'); despin('#files');
despin('#gfiles');
ebi('pro').innerHTML = res.logues ? res.logues[0] || "" : ""; ebi('pro').innerHTML = res.logues ? res.logues[0] || "" : "";
ebi('epi').innerHTML = res.logues ? res.logues[1] || "" : ""; ebi('epi').innerHTML = res.logues ? res.logues[1] || "" : "";
@@ -1995,8 +2063,7 @@ var msel = (function () {
} }
function selui() { function selui() {
clmod(ebi('wtoggle'), 'sel', getsel().length); clmod(ebi('wtoggle'), 'sel', getsel().length);
if (window['thegrid']) thegrid.loadsel();
thegrid.loadsel();
} }
function seltgl(e) { function seltgl(e) {
ev(e); ev(e);

View File

@@ -26,6 +26,13 @@ a {
border-radius: .2em; border-radius: .2em;
padding: .2em .8em; padding: .2em .8em;
} }
td, th {
padding: .3em .6em;
text-align: left;
}
.btns {
margin: 1em 0;
}
html.dark, html.dark,

View File

@@ -13,11 +13,28 @@
<div id="wrap"> <div id="wrap">
<p>hello {{ this.uname }}</p> <p>hello {{ this.uname }}</p>
{%- if avol %}
<h1>admin panel:</h1>
<table>
<thead><tr><th>vol</th><th>action</th><th>status</th></tr></thead>
<tbody>
{% for mp in avol %}
{%- if mp in vstate and vstate[mp] %}
<tr><td><a href="{{ mp }}{{ url_suf }}">{{ mp }}</a></td><td><a href="{{ mp }}?scan">rescan</a></td><td>{{ vstate[mp] }}</td></tr>
{%- endif %}
{% endfor %}
</tbody>
</table>
<div class="btns">
<a href="{{ avol[0] }}?stack">dump stack</a>
</div>
{%- endif %}
{%- if rvol %} {%- if rvol %}
<h1>you can browse these:</h1> <h1>you can browse these:</h1>
<ul> <ul>
{% for mp in rvol %} {% for mp in rvol %}
<li><a href="/{{ mp }}{{ url_suf }}">/{{ mp }}</a></li> <li><a href="{{ mp }}{{ url_suf }}">{{ mp }}</a></li>
{% endfor %} {% endfor %}
</ul> </ul>
{%- endif %} {%- endif %}
@@ -26,7 +43,7 @@
<h1>you can upload to:</h1> <h1>you can upload to:</h1>
<ul> <ul>
{% for mp in wvol %} {% for mp in wvol %}
<li><a href="/{{ mp }}{{ url_suf }}">/{{ mp }}</a></li> <li><a href="{{ mp }}{{ url_suf }}">{{ mp }}</a></li>
{% endfor %} {% endfor %}
</ul> </ul>
{%- endif %} {%- endif %}

View File

@@ -17,6 +17,7 @@ function goto_up2k() {
// chrome requires https to use crypto.subtle, // chrome requires https to use crypto.subtle,
// usually it's undefined but some chromes throw on invoke // usually it's undefined but some chromes throw on invoke
var up2k = null; var up2k = null;
var sha_js = window.WebAssembly ? 'hw' : 'ac'; // ff53,c57,sa11
try { try {
var cf = crypto.subtle || crypto.webkitSubtle; var cf = crypto.subtle || crypto.webkitSubtle;
cf.digest('SHA-512', new Uint8Array(1)).then( cf.digest('SHA-512', new Uint8Array(1)).then(
@@ -430,13 +431,15 @@ function up2k_init(subtle) {
// upload ui hidden by default, clicking the header shows it // upload ui hidden by default, clicking the header shows it
function init_deps() { function init_deps() {
if (!subtle && !window.asmCrypto) { if (!subtle && !window.asmCrypto) {
showmodal('<h1>loading sha512.js</h1><h2>since ' + shame + '</h2><h4>thanks chrome</h4>'); var fn = 'sha512.' + sha_js + '.js';
import_js('/.cpr/deps/sha512.js', unmodal); showmodal('<h1>loading ' + fn + '</h1><h2>since ' + shame + '</h2><h4>thanks chrome</h4>');
import_js('/.cpr/deps/' + fn, unmodal);
if (is_https) if (is_https)
ebi('u2foot').innerHTML = shame + ' so <em>this</em> uploader will do like 500kB/s at best'; ebi('u2foot').innerHTML = shame + ' so <em>this</em> uploader will do like 500kB/s at best';
else else
ebi('u2foot').innerHTML = 'seems like ' + shame + ' so do that if you want more performance'; ebi('u2foot').innerHTML = 'seems like ' + shame + ' so do that if you want more performance <span style="color:#' +
(sha_js == 'ac' ? 'c84">(expecting 20' : '8a5">(but dont worry too much, expect 100') + ' MiB/s)</span>';
} }
} }
@@ -886,6 +889,10 @@ function up2k_init(subtle) {
return base64; return base64;
} }
function hex2u8(txt) {
return new Uint8Array(txt.match(/.{2}/g).map(function (b) { return parseInt(b, 16); }));
}
function get_chunksize(filesize) { function get_chunksize(filesize) {
var chunksize = 1024 * 1024, var chunksize = 1024 * 1024,
stepsize = 512 * 1024; stepsize = 512 * 1024;
@@ -987,10 +994,18 @@ function up2k_init(subtle) {
if (subtle) if (subtle)
subtle.digest('SHA-512', buf).then(hash_done); subtle.digest('SHA-512', buf).then(hash_done);
else setTimeout(function () { else setTimeout(function () {
var hasher = new asmCrypto.Sha512(); var u8buf = new Uint8Array(buf);
hasher.process(new Uint8Array(buf)); if (sha_js == 'hw') {
hasher.finish(); hashwasm.sha512(u8buf).then(function (v) {
hash_done(hasher.result); hash_done(hex2u8(v))
});
}
else {
var hasher = new asmCrypto.Sha512();
hasher.process(u8buf);
hasher.finish();
hash_done(hasher.result);
}
}, 1); }, 1);
}; };

View File

@@ -238,6 +238,10 @@
color: #fff; color: #fff;
font-style: italic; font-style: italic;
} }
#u2foot span {
color: #999;
font-size: .9em;
}
#u2footfoot { #u2footfoot {
margin-bottom: -1em; margin-bottom: -1em;
} }

View File

@@ -80,6 +80,13 @@ command -v gdate && date() { gdate "$@"; }; while true; do t=$(date +%s.%N); (ti
var t=[]; var b=document.location.href.split('#')[0].slice(0, -1); document.querySelectorAll('#u2tab .prog a').forEach((x) => {t.push(b+encodeURI(x.getAttribute("href")))}); console.log(t.join("\n")); var t=[]; var b=document.location.href.split('#')[0].slice(0, -1); document.querySelectorAll('#u2tab .prog a').forEach((x) => {t.push(b+encodeURI(x.getAttribute("href")))}); console.log(t.join("\n"));
##
## bash oneliners
# get the size and video-id of all youtube vids in folder, assuming filename ends with -id.ext, and create a copyparty search query
find -maxdepth 1 -printf '%s %p\n' | sort -n | awk '!/-([0-9a-zA-Z_-]{11})\.(mkv|mp4|webm)$/{next} {sub(/\.[^\.]+$/,"");n=length($0);v=substr($0,n-10);print $1, v}' | tee /dev/stderr | awk 'BEGIN {p="("} {printf("%s name like *-%s.* ",p,$2);p="or"} END {print ")\n"}' | cat >&2
## ##
## sqlite3 stuff ## sqlite3 stuff

View File

@@ -9,6 +9,12 @@ ENV ver_asmcrypto=5b994303a9d3e27e0915f72a10b6c2c51535a4dc \
ver_zopfli=1.0.3 ver_zopfli=1.0.3
# TODO
# sha512.hw.js https://github.com/Daninet/hash-wasm
# sha512.kc.js https://github.com/chm-diederichs/sha3-wasm
# awk '/HMAC state/{o=1} /var HEAP/{o=0} /function hmac_reset/{o=1} /return \{/{o=0} /var __extends =/{o=1} /var Hash =/{o=0} /hmac_|pbkdf2_/{next} o{next} {gsub(/IllegalStateError/,"Exception")} {sub(/^ +/,"");sub(/^\/\/ .*/,"");sub(/;$/," ;")} 1' <sha512.ac.js.orig >sha512.ac.js; for fn in sha512.ac.js.orig sha512.ac.js; do wc -c <$fn; wc -c <$fn.gz ; for n in {1..9}; do printf '%8d %d bz\n' $(bzip2 -c$n <$fn | wc -c) $n; done; done
# download; # download;
# the scp url is latin from https://fonts.googleapis.com/css2?family=Source+Code+Pro&display=swap # the scp url is latin from https://fonts.googleapis.com/css2?family=Source+Code+Pro&display=swap
RUN mkdir -p /z/dist/no-pk \ RUN mkdir -p /z/dist/no-pk \

12
scripts/install-githooks.sh Executable file
View File

@@ -0,0 +1,12 @@
#!/bin/bash
set -ex
[ -e setup.py ] || ..
[ -e setup.py ] || {
echo u wot
exit 1
}
cd .git/hooks
rm -f pre-commit
ln -s ../../scripts/run-tests.sh pre-commit

34
scripts/profile.py Normal file
View File

@@ -0,0 +1,34 @@
#!/usr/bin/env python3
import sys
sys.path.insert(0, ".")
cmd = sys.argv[1]
if cmd == "cpp":
from copyparty.__main__ import main
argv = ["__main__", "-v", "srv::r", "-v", "../../yt:yt:r"]
main(argv=argv)
elif cmd == "test":
from unittest import main
argv = ["__main__", "discover", "-s", "tests"]
main(module=None, argv=argv)
else:
raise Exception()
# import dis; print(dis.dis(main))
# macos:
# option1) python3.9 -m pip install --user -U vmprof==0.4.9
# option2) python3.9 -m pip install --user -U https://github.com/vmprof/vmprof-python/archive/refs/heads/master.zip
#
# python -m vmprof -o prof --lines ./scripts/profile.py test
# linux: ~/.local/bin/vmprofshow prof tree | grep -vF '[1m 0.'
# macos: ~/Library/Python/3.9/bin/vmprofshow prof tree | grep -vF '[1m 0.'
# win: %appdata%\..\Roaming\Python\Python39\Scripts\vmprofshow.exe prof tree

12
scripts/run-tests.sh Executable file
View File

@@ -0,0 +1,12 @@
#!/bin/bash
set -ex
pids=()
for py in python{2,3}; do
$py -m unittest discover -s tests >/dev/null &
pids+=($!)
done
for pid in ${pids[@]}; do
wait $pid
done

View File

@@ -8,13 +8,13 @@ import time
import shutil import shutil
import pprint import pprint
import tarfile import tarfile
import tempfile
import unittest import unittest
from argparse import Namespace from argparse import Namespace
from copyparty.authsrv import AuthSrv
from copyparty.httpcli import HttpCli
from tests import util as tu from tests import util as tu
from copyparty.authsrv import AuthSrv
from copyparty.httpcli import HttpCli
def hdr(query): def hdr(query):
@@ -32,6 +32,8 @@ class Cfg(Namespace):
no_zip=False, no_zip=False,
no_scandir=False, no_scandir=False,
no_sendfile=True, no_sendfile=True,
no_rescan=True,
ihead=False,
nih=True, nih=True,
mtp=[], mtp=[],
mte="a", mte="a",
@@ -40,13 +42,15 @@ class Cfg(Namespace):
class TestHttpCli(unittest.TestCase): class TestHttpCli(unittest.TestCase):
def test(self): def setUp(self):
td = os.path.join(tu.get_ramdisk(), "vfs") self.td = tu.get_ramdisk()
try:
shutil.rmtree(td)
except OSError:
pass
def tearDown(self):
os.chdir(tempfile.gettempdir())
shutil.rmtree(self.td)
def test(self):
td = os.path.join(self.td, "vfs")
os.mkdir(td) os.mkdir(td)
os.chdir(td) os.chdir(td)

View File

@@ -7,13 +7,12 @@ import json
import shutil import shutil
import tempfile import tempfile
import unittest import unittest
from textwrap import dedent from textwrap import dedent
from argparse import Namespace from argparse import Namespace
from copyparty.authsrv import AuthSrv
from copyparty import util
from tests import util as tu from tests import util as tu
from copyparty.authsrv import AuthSrv
from copyparty import util
class Cfg(Namespace): class Cfg(Namespace):
@@ -25,6 +24,13 @@ class Cfg(Namespace):
class TestVFS(unittest.TestCase): class TestVFS(unittest.TestCase):
def setUp(self):
self.td = tu.get_ramdisk()
def tearDown(self):
os.chdir(tempfile.gettempdir())
shutil.rmtree(self.td)
def dump(self, vfs): def dump(self, vfs):
print(json.dumps(vfs, indent=4, sort_keys=True, default=lambda o: o.__dict__)) print(json.dumps(vfs, indent=4, sort_keys=True, default=lambda o: o.__dict__))
@@ -55,12 +61,7 @@ class TestVFS(unittest.TestCase):
pass pass
def test(self): def test(self):
td = os.path.join(tu.get_ramdisk(), "vfs") td = os.path.join(self.td, "vfs")
try:
shutil.rmtree(td)
except OSError:
pass
os.mkdir(td) os.mkdir(td)
os.chdir(td) os.chdir(td)
@@ -227,7 +228,7 @@ class TestVFS(unittest.TestCase):
self.assertEqual(list(v1), list(v2)) self.assertEqual(list(v1), list(v2))
# config file parser # config file parser
cfg_path = os.path.join(tu.get_ramdisk(), "test.cfg") cfg_path = os.path.join(self.td, "test.cfg")
with open(cfg_path, "wb") as f: with open(cfg_path, "wb") as f:
f.write( f.write(
dedent( dedent(
@@ -260,6 +261,4 @@ class TestVFS(unittest.TestCase):
self.assertEqual(n.uwrite, ["asd"]) self.assertEqual(n.uwrite, ["asd"])
self.assertEqual(len(n.nodes), 0) self.assertEqual(len(n.nodes), 0)
os.chdir(tempfile.gettempdir())
shutil.rmtree(td)
os.unlink(cfg_path) os.unlink(cfg_path)

View File

@@ -1,16 +1,36 @@
import os import os
import sys
import time import time
import shutil
import jinja2 import jinja2
import tempfile import tempfile
import platform
import subprocess as sp import subprocess as sp
from copyparty.util import Unrecv
WINDOWS = platform.system() == "Windows"
ANYWIN = WINDOWS or sys.platform in ["msys"]
MACOS = platform.system() == "Darwin"
J2_ENV = jinja2.Environment(loader=jinja2.BaseLoader) J2_ENV = jinja2.Environment(loader=jinja2.BaseLoader)
J2_FILES = J2_ENV.from_string("{{ files|join('\n') }}") J2_FILES = J2_ENV.from_string("{{ files|join('\n') }}")
def nah(*a, **ka):
return False
if MACOS:
import posixpath
posixpath.islink = nah
os.path.islink = nah
# 25% faster; until any tests do symlink stuff
from copyparty.util import Unrecv
def runcmd(*argv): def runcmd(*argv):
p = sp.Popen(argv, stdout=sp.PIPE, stderr=sp.PIPE) p = sp.Popen(argv, stdout=sp.PIPE, stderr=sp.PIPE)
stdout, stderr = p.communicate() stdout, stderr = p.communicate()
@@ -28,18 +48,25 @@ def chkcmd(*argv):
def get_ramdisk(): def get_ramdisk():
def subdir(top):
ret = os.path.join(top, "cptd-{}".format(os.getpid()))
shutil.rmtree(ret, True)
os.mkdir(ret)
return ret
for vol in ["/dev/shm", "/Volumes/cptd"]: # nosec (singleton test) for vol in ["/dev/shm", "/Volumes/cptd"]: # nosec (singleton test)
if os.path.exists(vol): if os.path.exists(vol):
return vol return subdir(vol)
if os.path.exists("/Volumes"): if os.path.exists("/Volumes"):
devname, _ = chkcmd("hdiutil", "attach", "-nomount", "ram://32768") # hdiutil eject /Volumes/cptd/
devname, _ = chkcmd("hdiutil", "attach", "-nomount", "ram://65536")
devname = devname.strip() devname = devname.strip()
print("devname: [{}]".format(devname)) print("devname: [{}]".format(devname))
for _ in range(10): for _ in range(10):
try: try:
_, _ = chkcmd("diskutil", "eraseVolume", "HFS+", "cptd", devname) _, _ = chkcmd("diskutil", "eraseVolume", "HFS+", "cptd", devname)
return "/Volumes/cptd" return subdir("/Volumes/cptd")
except Exception as ex: except Exception as ex:
print(repr(ex)) print(repr(ex))
time.sleep(0.25) time.sleep(0.25)
@@ -50,7 +77,7 @@ def get_ramdisk():
try: try:
os.mkdir(ret) os.mkdir(ret)
finally: finally:
return ret return subdir(ret)
class NullBroker(object): class NullBroker(object):
@@ -91,7 +118,10 @@ class VHttpConn(object):
self.auth = auth self.auth = auth
self.log_func = log self.log_func = log
self.log_src = "a" self.log_src = "a"
self.lf_url = None
self.hsrv = VHttpSrv() self.hsrv = VHttpSrv()
self.nbyte = 0 self.nbyte = 0
self.workload = 0 self.workload = 0
self.ico = None
self.thumbcli = None
self.t0 = time.time() self.t0 = time.time()