ycl

Artifact [2a668652b7]
Login

Artifact [2a668652b7]

Artifact 2a668652b72a3131fbab569148cfdbcb63c03e84:


#! /usr/bin/env tclsh

package require pki
package require sha256
package require sqlite3

package require {ycl proc}
namespace import [yclprefix]::proc::import

package require {ycl bits struct}
import [yclprefix]::bits
package require {ycl db sqlite util}
[yclprefix] proc alias dbget_ [yclprefix]::db::sqlite::util::get
package require {ycl dir}
namespace import [yclprefix]::dir::mktempdir
namespace import [yclprefix]::dir::noencoding
package require {ycl comm ucsd}
[yclprefix] proc alias ucsd [yclprefix]::comm::ucsd
package require {ycl dir scan}
package require {ycl keep keep}
package require {ycl math rand}
interp alias {} [namespace current]::randbytes {} [
	yclprefix]::math::rand::randbytes
package require {ycl string cdc}
[yclprefix] proc alias cut [yclprefix]::string::cdc::cut
[yclprefix] proc alias cuts [yclprefix]::string::cdc::cuts
[yclprefix] proc alias signature_sha256 \
	[yclprefix]::string::cdc::signature_sha256
package require {ycl string printable}
[yclprefix] proc alias printable [yclprefix]::string::printable


proc .id {_} {
	$_ dbget {select v from system where e = 0 and a = 'id'}
}
.my .method .id

proc .init {_ args} {
	$_ .vars keepdir port fsdb fsdir workdir
	while {[llength $args]} {
		set args [lassign $args[set args {}] opt val]
		switch $opt {
			cmd {
				set cmd [list $val {*}$args]
				break
			}
			workdir {
				set $opt $val
			}
			default {
				error [list {unknown option} $opt]
			}
		}
	}

	namespace ensemble create -command [
		$_ .namespace]::repo -parameters _ -map {
		add repo_add
	}
	$_ .eval [list $_ .method repo]

	namespace ensemble create -command [
		$_ .namespace]::scan -parameters _ -map {
			list scan_list
			run scan_run
		}
	$_ .eval [list $_ .method scan]

	namespace ensemble create -command [
		$_ .namespace]::remote -parameters _ -map {
		add remote_add
		list remote_list
		push remote_push
	}
	$_ .eval [list $_ .method remote]

	if {[info exists workdir]} {
		$_ workdir $workdir
	}
	set port 7480

	if {[info exists cmd]}  {
		tailcall $_ cmdline {*}$cmd
	}
	return $_
}
.my .method .init


proc archive {_ epoch} {
	set chan {}
	set currentpath {}
	set archived 0
	# must be an even number
	set batchsize 2048
	set existed 0
	set openfailure 0
	set readfailure 0
	set storefailure 0
	set complete 0
	set batch {}
	set batchsize 0
	set storebatch {
		while 1 {
			try {
				$_ keep storebatch $batch
			} on error {tres topts} {
				puts stderr [printable [dict get $topts -errorinfo]]
			} on ok {} {
				incr archived $batchsize 
				break
			}
			if {[incr failures] >= 100} {
				incr storefailure $batchsize
				error [list {too many failures to store} \
					failures $failures]
			}
			# rest before trying again
			after 1000
		}
		set failures 0
		set batch {}
		set batchsize 0
	}
	# this query relies on successive cuts offsets being in sequential order in
	# the cuts table
	set query {
		select fs.fileset.rowid as fileid
			, fs.fileset.size as size
			, fs.hashes.rowid as hashrowid
			, fs.paths.path as path
			, cuts1.offset as start
			, cuts1.hash as hash
			, coalesce(cuts2.offset, 'end') as next
		from fs.fileset
		join fs.paths on fs.fileset.path = fs.paths.rowid
		join fs.hashes on fs.fileset.rowid = fs.hashes.fileid 
		join fs.cuts as cuts1 on fs.hashes.streamid = cuts1.streamid
		left join fs.cuts as cuts2 on cuts2.rowid = (
			select rowid from fs.cuts
			where fs.cuts.streamid = cuts1.streamid
			and fs.cuts.offset > cuts1.offset
			limit 1
		)
		where fs.fileset.epoch = $epoch
		order by path ,cuts1.streamid ,cuts1.offset
	}
	$_ db eval $query {
		if {[$_ keep exists $hash]} {
			puts [list {already archived} file $path \
				id $fileid start $start end $next]
			incr existed $size
			continue
		}
		set end2 [expr {$next eq {end}? $size - 1 : $next - 1}]
		puts [list archiving {percent complete} [
				expr {entier(($size > 0 ? double($end2) / $size : 1) * 100)}] \
			id $fileid \
			start $start \
			end $end2 \
			file $path \
		]
		if {$currentpath ne $path} {
			catch {close $chan}
			try {
				noencoding {
					set chan [open $path]
				}
			} on error {} {
				puts [list {could not open path} $path]
				set chan {}
				incr openfailure
				continue
			} on ok {} {
				chan configure $chan -translation binary
			}
			set currentpath $path
		}

		if {$chan eq {}} {
			incr storefailure $size
		} else {
			seek $chan $start start
			if {$next eq {end}} {
				set chunk [read $chan]
			} else {
				set chunk [read $chan [expr {$next - $start}]]
			}

			if {[$_ hash $chunk] eq $hash} {
				lappend batch $hash $chunk
				incr batchsize $size
				if {[llength $batch] >= $batchsize} {
					try $storebatch
				}
			} else {
				puts stderr [printable [list {chunk signatures do not match} \
					file $currentpath offset $start]]
				incr readfailure $size

			}
		}
	}
	if {[llength $batch]} {
		try $storebatch
	}
	return [list $archived $existed $storefailure $openfailure $readfailure]
}
.my .method archive


proc archivefsdb {_} {
	variable scandbheadermagic
	$_ .vars fsdir selfbackupdir
	set bheadermagic [binary format H* $scandbheadermagic]
	set fs [$_ .namespace]::[info cmdcount]_scan
	try {
		[yclprefix] dir scan .spawn $fs 
		$fs init workdir $fsdir

		set fsid [$fs .id]
		if {[string length $fsid] == 0} {
			error [list {bad fsid}]
		}
		$_ fsdb_register $fsid

		set fsepoch [$_ dbget {
			select v from fs.system where e = 0 and a = 'epoch'
		}]

		set fsrowid [$_ dbget {
			select rowid from filesystems where fsid = @fsid
		}]

		if {[$_ db exists {
			select 1 from fsbackups
				where fsrowid = $fsrowid and fsepoch = $fsepoch}]
		} {
			set hash [$_ db onecolumn {
				select hash from fsbackups
				where fsrowid = $fsrowid and fsepoch = $fsepoch
			}]
		} else {
			#$_ db eval {vacuum fs}
			set backupfile $selfbackupdir/scansbackup
			$_ db backup fs $backupfile
			set hash [$_ storeartifact $backupfile]

			set header $bheadermagic\0[
				bits struct encode values $fsid [
					bits number encode $fsepoch]]
			$_ keep store $header [bits struct encode value $hash]
			$_ db eval {
				insert into fsbackups values (
					null
					, $fsrowid
					, $fsepoch
					, @hash
				)
			}
			file delete $backupfile
		}
	} finally {
		rename $fs {}
	}
	return $hash
}
.my .method archivefsdb


proc archivegrypdb {_ fsdbhash keepdbhash} {
	variable grypdbheadermagic
	$_ .vars selfbackupdir
	set bheadermagic [binary format H* $grypdbheadermagic]
	set grypid [$_ .id]
	set grypepoch [$_ dbget {
		select v from system where e = 0 and a = 'epoch'
	}]
	if {[$_ db exists {select 1 from grypbackups where epoch = $grypepoch}]} {
		set hash [$_ dbget {
			select hash from grypbackups where epoch = $grypepoch
		}]
	} else {
		set backupfile $selfbackupdir/grypbackup
		try {
			$_ db backup main $backupfile
			set hash [$_ storeartifact $backupfile]
			set header $bheadermagic\0[
				bits struct encode values $grypid [
					bits number encode $grypepoch]]
			$_ keep store $header [
				bits struct encode values $hash $fsdbhash $keepdbhash]
			$_ db eval {
				insert into grypbackups values ($grypepoch ,@hash)
			}
		} finally {
			file delete $backupfile
		}
	}
	return $hash
}
.my .method archivegrypdb


proc archivekeepdb _ {
	variable keepdbheadermagic
	$_ .vars selfbackupdir

	set keepid [$_ dbget {select v from keep.system where e = 0 and a = 'id'}]
	set keeprowid [$_ dbget {select rowid from keeps where keepid = @keepid}] 
	set keepepoch [$_ dbget {
		select v from keep.system where e = 0 and a = 'epoch'
	}]

	puts [list whoa $keepepoch]

	if {[$_ db exists {
		select 1 from keepbackups
		where keepepoch = $keepepoch
	}]} {
		set hash [$_ db onecolumn {
			select hash from keepbackups where keepepoch = $keepepoch
		}]
	} else {
		puts nerzdum
		set bheadermagic [binary format H* $keepdbheadermagic]

		set keeptmp [$_ .namespace]::[info cmdcount]_keep_tmp
		set keepdir $selfbackupdir/keepbackup
		file mkdir $keepdir
		set backupfile $keepdir/system
		try {
			[yclprefix] keep keep .new $keeptmp workdir $keepdir
			rename $keeptmp {}

			$_ db transaction {$_ db eval {
				attach $backupfile as keeptmp

				; delete from keeptmp.system
				; delete from keeptmp.repositories
				; delete from keeptmp.repotype

				;insert into keeptmp.system
					select * from keep.system
				;insert into keeptmp.repositories
					select * from keep.repositories
				;insert into keeptmp.repotype
					select * from keep.repotype

			}}

			$_ db eval {
				detach keeptmp
			}

			set hash [$_ storeartifact $backupfile]
			set header $bheadermagic\0[
				bits struct encode values $keepid [
					bits number encode $keepepoch]]
			$_ keep store $header [bits struct encode value $hash]
			$_ db eval {insert or ignore into keepbackups
				values (null ,$keeprowid ,$keepepoch ,@hash)}
		} finally {
			foreach fname [list \
				$backupfile $keepdir/system-shm $keepdir/system-wal $keepdir
			] {
				if {[file exists $fname]} {
					file delete $fname 
				}
			}
		}
	}
	return $hash
}
.my .method archivekeepdb


proc archiveself _ {
	$_ .vars selfbackupdir
	$_ db transaction {
		if {[$_ db exists {
			select * from system where e = 0 and a = 'inselfbackup'
		}]} {
			return
		}
		$_ db eval {
			insert into system values (null ,0 ,'inselfbackup' ,1)
		}
	}
	$_ backupdir_rm
	file mkdir $selfbackupdir
	try {
		puts [list do fsbackup]
		set fsdbhash [$_ archivefsdb]
		set fsbackuprowid [$_ dbget {
				select rowid from fsbackups where hash = @fsdbhash
		}]

		puts [list do keepbackup]
		set keepdbhash [$_ archivekeepdb]
		set keepbackuprowid [$_ dbget {
			select rowid from keepbackups where hash = @keepdbhash 
		}]

		puts [list do grypbackup]
		set hash [$_ archivegrypdb $fsdbhash $keepdbhash]
		set grypbackuprowid [$_ dbget {
			select epoch from grypbackups where hash = @hash 
		}]

		$_ db eval {
			insert or ignore into backups values (
				null ,$fsbackuprowid ,$grypbackuprowid ,$keepbackuprowid)
		}
	} finally {
		$_ db eval {
			delete from system where e = 0 and a = 'inselfbackup'
		}
		$_ backupdir_rm
	}
puts [list self backup complete]
return
}
.my .method archiveself


proc backupdir_rm _ {
	$_ .vars selfbackupdir
	# carefully delete things in the backup directory
	foreach fname {
		grypbackup
		grypbackup-journal
		keepbackup
		keepbackup-journal
		scansbackup
		scansbackup-journal
	} {
		set fname $selfbackupdir/$fname
		if {[file exists $fname]} {
			file delete $fname
		}
	}
	file delete $selfbackupdir
}
.my .method backupdir_rm


proc cutfile {_ fname} {
	set chan [open $fname]
	try {
		chan configure $chan -translation binary
		cut $chan
		read $chan
		set cuts [cuts $chan]
		set signature [signature_sha256 $chan]
	} finally {
		close $chan
	}
	return [list $signature $cuts]
}
.my .method cutfile


proc dbarchive {_ filename} {
}
.my .method dbarchive


proc dbget {_ query} {
	tailcall dbget_ [list $_ db] $query
}
.my .method dbget


proc dbsetup _ {
	variable magicb
	$_ .vars workdir
	set dbname [$_ .namespace]::db
	sqlite3 $dbname $workdir/system  
	$_ .eval [list $_ .routine db]

	$_ db transaction {
		if {[$_ db exists {select * from sqlite_master}]} {
			try {
				set dbmagic [$_ db onecolumn {
					select v from system where e = 0 and a = 'typeid'
				}]
			} on error {tres topts} {
				puts stderr [list gryp {error querying database} $tres]
			}
			if {![info exists dbmagic] || $dbmagic ne $magicb} {
				error [list {not a valid gryp workdir}]
			}
		}

		$_ db eval {
			create table if not exists backups (
				rowid integer primary key
				, fsbackupid numeric
				, grypbackupid numeric
				, keepbackupid numeric
			)

			; create unique index if not exists c_unique on backups (
				fsbackupid ,grypbackupid ,keepbackupid
			)

			; create table if not exists filesystems (
				rowid integer primary key
				,fsid blob unique
			)

			; create trigger if not exists trigger_filesystems_insert
			insert on filesystems
			begin
				update system set v = v + 1 where e = 0 and a = 'epoch'  
			; end


			; create table if not exists fsbackups (
				rowid integer primary key
				, fsrowid numeric
				, fsepoch numeric
				, hash blob
			)

			; create unique index if not exists c_unique on fsbackups (
				fsrowid ,fsepoch
			)

			; create table if not exists grypbackups (
				epoch integer primary key
				,hash blob
			)

			; create table if not exists keepbackups (
				rowid integer primary key
				, keeprowid numeric
				, keepepoch numeric
				, hash blob
			)

			; create unique index if not exists c_unique on keepbackups (
				keeprowid ,keepepoch
			)

			; create table if not exists keeps (
				rowid integer primary key
				,keepid blob unique
			)

			; create trigger if not exists trigger_keeps_insert
			insert on keeps
			begin
				update system set v = v + 1 where e = 0 and a = 'epoch'  
			; end

			; create table if not exists system (
				rowid integer primary key
				,e
				,a
				,v
			)

			; create table if not exists remotes (
				rowid integer primary key
				, address
				, port
				, constraint c_unique unique (
					address ,port
				)
			)

			; create trigger if not exists trigger_remotes_insert
			insert on remotes
			begin
				update system set v = v + 1 where e = 0 and a = 'epoch'  
			; end

			; create table if not exists track (
				rowid integer primary key
				, path unique
			)
		}


		if {![$_ db exists {select 1 from system where e = 0 and a = 'typeid'}]} {
			set id [$_ randbytes]
			$_ db eval {
				;insert into system values (null ,0 ,'typeid' ,@magicb)
				;insert into system values (null ,0 ,'id' ,@id)
				;insert into system values (null ,0 ,'version' , '0.0.1')
				;insert into system values (null ,0 ,'epoch' , 0)
			}
		}

		#-- ; pragma journal_mode=WAL
		#-- ; pragma main.synchronous=OFF
	}
	return
}
.my .method dbsetup


proc cmdline {_ name args} {
	switch $name {
		archive - archiveself - keep - listen - remote - repo - restore  - scan - track  {
			$_ $name {*}$args
			exit 0
		} 
		ui {
			$_ $name {*}$args
		}
		default {
			error [list {unknown command} $name]
		}
	}
	
}
.my .method cmdline


proc distribute _ {
	$_ keep distribute
}
.my .method distribute


proc fsdb_register {_ id} {
	$_ db transaction {
		$_ db eval {
			insert or ignore into filesystems values (null ,@id) 
		}
	}
}
.my .method fsdb_register


proc hash {_ data} {
	set shatok [::sha2::SHA256Init-critcl]
	::sha2::SHA256Update-critcl $shatok $data
	set hash [::sha2::SHA256Final-critcl $shatok]
	return $hash
}
.my .method hash


proc keepsetup _ {
	$_ .vars keepdir
	file mkdir $keepdir
	[yclprefix] keep keep .new [$_ .namespace]::keep workdir $keepdir
	$_ .eval [list $_ .routine keep]
	set keepsystem $keepdir/system
	$_ db eval {
		attach $keepsystem as keep
	}

	set keepid [$_ dbget {select v from keep.system where e = 0 and a = 'id'}]
	$_ db eval {
		; insert or ignore into keeps values ( null , @keepid)
	}
	return
}
.my .method keepsetup


proc pkisetup _ {
	if {![$_ db exists {select * from system where e = 0 and a = 'pkikey'}]} {
		puts stderr [list generating keypair]
		set pkikey [pki::rsa::generate 2048]
		$_ db eval {
			insert into system values (null ,0 ,'pkikey' ,@pkikey)
		}
	}
	return
}
.my .method pkisetup


proc prune {_ dirname}  {
	variable magic
	if {[file exists $dirname/.$magic]} {
		return 1
	}
	return 0
}
.my .method prune


proc listen {_ args} {
	$_ .vars port
	#package require tls
	dict size $args
	foreach {opt val} $args {
		switch $opt {
			port {
				set port $val
			}
			default {
				error [list {unknown option} $opt]
			}
		}
	}
	#set chan [tls::socket -server [list $_ serve] $port]
	set chan [socket -server [list $_ serve] $port]
	vwait forever
}
.my .method listen


proc randbytes_ _ {
	randbytes 32
}
.my .method randbytes randbytes_


proc remote_add {_ args} {
	dict size $args
	foreach {opt val} $args {
		switch $opt {
			address - port {
				set $opt $val
			}
			default {
				error [list {unknown option} $opt]
			}
		}
	}
	set address
	$_ db eval {
		insert or ignore into remotes values (null ,@address ,@port)
	}
}
.my .method remote_add


proc remote_list{_ args} {
	$_ db eval {select address ,port from remotes}
}
.my .method remote_list


proc remote_push {_ args} {
	#package require tls
	$_ .vars port
	$_ db eval {select address ,port as rport from remotes} {
		if {$rport eq {}} {
			set rport $port
		}
	}
	#set chan [tls::socket $address $rport]
	set chan [socket $address $rport]

	puts $chan howdy
	flush $chan
	::close $chan
	return
}
.my .method remote_push


proc repo_add {_ args} {
	set paths {}
	while {[llength $args]} {
		set args [lassign $args[set args {}] opt val]
		switch $opt {
			path {
				set path [file dirname [file normalize $val/...]]
				lappend paths $path 
			}
			default {
				error [list {unknown option} $opt]
			}
		}
	}
	foreach path $paths {
	puts [list adding repository $path]
		$_ keep repository add sqlite path $path
	}
	return
}


proc repos {_} {
	set repos [$_ keep repositories]
	while 1 {
		set repo [$repos]
		dict with repo {}
		binary scan $instance H* instancehex
		dict unset repo instance
		dict unset repo rowid
		puts $rowid
		puts $instancehex
		foreach {key val} $repo {
			puts [list $key $val]
		}
		puts {}
	}

}
.my .method repos


proc restore {_ id to} {
	set restoredfiles 0
	set restoreddirs 0
	set totalbytes 0
	if {[file exists $to]} {
		error [list {already exists} $to]
	}
	set currentpath {}
	$_ db eval {
		select paths.path as path
			, fs.fileset.size as size
			,cuts1.offset as start
			,cuts1.hash as hash
			, coalesce(cuts2.offset, 'end') as next
		from fs.fileset
		join fs.paths on fs.fileset.path = fs.paths.rowid
		left join fs.hashes on fs.fileset.rowid = fs.hashes.fileid
		left join fs.cuts as cuts1 on fs.hashes.streamid = cuts1.streamid
		left join fs.cuts as cuts2 on cuts2.rowid = (
			select rowid from fs.cuts
			where fs.cuts.streamid = cuts1.streamid
			and fs.cuts.offset > cuts1.offset
			limit 1
		)
		where fs.fileset.rowid = $id
		or fs.paths.path like (
			select fs.paths.path from fs.fileset
			join fs.paths on fs.fileset.path = fs.paths.rowid 
			where fs.fileset.rowid = $id 
			limit 1
		) || '%'
	} {
		puts [list restoring $path]
		if {$start eq {}} {
			set newdir $to/$path
			if {![file exists $newdir]} {
				file mkdir $newdir
			}
			incr restoreddirs
		} else {
			if {$path ne $currentpath} {
				incr restoredfiles
				if {$restoredfiles || $restoreddirs} {
					if {[info exists chan]} {
						close $chan
					}
					set newpath $to/$path
					noencoding {
						file mkdir [file dirname $newpath]
					}
				} else {
					set newpath $to
					# $id is a file rather than a directory
				}
				noencoding {
					set chan [open $newpath w]
				}
				chan configure $chan -translation binary
				set currentpath $path
			}
			puts -nonewline $chan [$_ keep retrieve $hash] 
		}
		incr totalbytes $size
	}
	if {[info exists chan]} {
		close $chan
	}
	set res [dict create directories $restoreddirs files $restoredfiles bytes $totalbytes]
	puts [list {restore complete} {*}$res]
	return $res 
}
.my .method restore


proc scan_list {_} {
}


proc scan_run {_ args} {
	$_ .vars fsdir
	set paths {}
	set scanargs {}
	while {[llength $args]} {
		set args [lassign $args[set args {}] opt val]
		switch $opt {
			path {
				lappend paths $val
			}
			default {
				lappend scanargs $opt $val
			}
		}
	}
	set archived 0
	set existed 0
	set failed 0
	set openfailed 0
	set readfailed 0
	if {[llength $paths]} {
		foreach path $paths {
			set epoch [$_ scan_do $path {*}$scanargs]
			lassign [$_ archive $epoch] archived1 existed1 failed1 \
				openfailed1 readfailed1
			incr archived $archive1
			incr existed $existed1
			incr failed $failed1
			incr openfailed $openfailed1
			incr readfailed $readfailed1
		}
	} else {
		$_ db eval {
			select path from track
		} {
			set epoch [$_ scan_do $path {*}$scanargs]
			lassign [$_ archive $epoch] archived1 existed1 failed1 \
				openfailed1 readfailed1
			incr archived $archived1
			incr existed $existed1
			incr failed $failed1
			incr openfailed $openfailed1
			incr readfailed $readfailed1
		}
	}
	puts [list {scan complete} {bytes archived} $archived \
		{bytes existing} $existed  \
		{bytes failed} $failed \
		{open failed} $openfailed \
		{read failed} $readfailed
	]
	return
}
.my .method scan_run


proc scan_do {_ path args} {
	$_ .vars fsdir
	set name [$_ .namespace]::[info cmdcount]_scan
	[yclprefix] dir scan .spawn $name 
	$name init workdir $fsdir path $path
	$_ fsdb_register [$name .id]
	$name scan prune [list $_ prune] {*}$args
	set epoch [$name epoch]
	rename $name {}
	return $epoch
}
.my .method scan_do


proc storeartifact {_ fname} {
	variable cutsmagic
	variable cutsmagicb [binary format H* $cutsmagic]
	lassign [$_ cutfile $fname] signature cuts
	set cutcount [dict size $cuts]
	set header $cutsmagic\0[bits struct encode values $signature [
		bits number encode $cutcount]]
	set start 0
	set chan [open $fname]
	chan configure $chan -translation binary
	set i 0
	set batch {}
	set indices {}
	foreach {last hash} $cuts[set cuts {}] {
		lappend indices $last
		lappend hashes $hash
	}
	try {
		set existing [$_ keep existing $hashes]
		foreach last $indices hash $hashes e1 $existing {
			if {$e1} {
				puts [list {already kept} file $fname chunk $i]
			} else {
				seek $chan $start start
				set chunk [read $chan [expr {$last - $start}]]
				if {$chunk eq {}} {
					error [list {empty chunk}]
				}
				if {[$_ hash $chunk] ne $hash} {
					error [list {hashes don't match} start $start]
				}
				lappend batch $hash $chunk
				if {[llength $batch] > 2048} {
					$_ keep storebatch $batch
					set batch {} 
				}
			}
			#append header [bits struct encode value $hash]
			set start $last
			incr i
		}
		if {[llength $batch]} {
			$_ keep storebatch $batch
			set batch {}
		}
		append header \0
		set hash [$_ hash header]
		$_ keep store [$_ hash header] $header
	} finally {
		close $chan
	}
	return $hash
}
.my .method storeartifact


proc fssetup _ {
	$_ .vars fsdb fsdir
	if {![file exists $fsdb]} {
		set name [$_ .namespace]::[info cmdcount]_scan
		[yclprefix] dir scan .spawn $name 
		$name init workdir $fsdir
		rename $name {}
		#$name init workdir $fsdir path $path
	}
	$_ db eval {
		attach database $fsdb as fs
	}
	set fsdbid [$_ dbget {
		select v from fs.system where e = 0 and a = 'id'
	}]
	$_ fsdb_register $fsdbid 
	return
}
.my .method fssetup


proc serve {_ chan address port args} {
	puts [list hweebo $args]
	puts [list gonk [read $chan]]
}
.my .method serve


proc storebatch {_ batch} {
}
.my .method storebatch


proc track {_ args} {
	while {[llength $args]} {
		set args [lassign $args[set args {}] opt val]
		switch $opt {
			path {
				lappend paths $val
			}
			default {
				error [list {uknown option} $opt]
			}
		}
	}
	foreach path $paths {
		set path [file dirname [file normalize $path/...]]
		$_ db eval {
			insert or ignore into track values (null ,$path)
		}
	}
	return
}
.my .method track


proc ui {_ args} {
	package require {ycl gryp ui}
	package require Tk
	[[yclprefix] gryp ui .new ui] .init top {} {*}$args gryp $_
}
.my .method ui


proc workdir {_ args} {
	variable magic
	$_ .vars keepdir fsdb fsdir selfbackupdir workdir

	if {[llength $args] == 1} {
		if {[info exists workdir]} {
			error [list {work directory already set}]
		}
		set workdir [file dirname [file normalize [lindex $args 0]/...]]

		# to do
		#     make this atomic
		file mkdir $workdir
		set magicfile $workdir/.$magic
		if {![file exists $magicfile]} {
			set glob [glob -nocomplain -directory $workdir *]
			if {[llength $glob]} {
				error [list {not a gryp project directory}]
			}
			set magicchan [open $magicfile {EXCL CREAT WRONLY}]
			set glob [glob -nocomplain -directory $workdir *]
			if {[llength $glob]} {
				error [list {gryp project directory already contains files}]
			}
			close $magicchan
		}

		set keepdir $workdir/keep
		set fsdir $workdir/scans
		set selfbackupdir $workdir/selfbackup
		set fsdb $fsdir/system
		$_ dbsetup
		$_ pkisetup
		$_ keepsetup
		$_ fssetup
	} elseif {[llength $args]} {
		error [list {wrong # args}]
	}
	if {[info exists workdir]} {
		return $workdir
	} else {
		return {}
	}
}
.my .method workdir


variable magic e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855
variable magicb [binary format H* $magic]

# The only place the binary form of these values should ever be written to disk
# is at the beginning of a list of cuts their respective database backups
variable scandbheadermagic 669d31589b710168388baf7924c66f9af928c63df0ecbe40270d2a2316c1d7d1
variable keepdbheadermagic 5a647ebecdaa96c3861c598de5a2d2df6c3b44cb5605872b2da837df69d1ff79
variable grypdbheadermagic 27fad3c8b860db01d42967339936d4c6228549cf4576e998fdb1d472b260615e
variable grypsetheadermagic 73bada9f70ea330b31c5e6d482e966b7cfbb3bafeacf9822c80b20d787280e5c
variable cutsmagic cf8ffd1e022974d166769272bb9eff3df68e58b752065cd1b152386324a19dfb