/* ** Copyright (c) 2010 D. Richard Hipp ** ** This program is free software; you can redistribute it and/or ** modify it under the terms of the Simplified BSD License (also ** known as the "2-Clause License" or "FreeBSD License".) ** This program is distributed in the hope that it will be useful, ** but without any warranty; without even the implied warranty of ** merchantability or fitness for a particular purpose. ** ** Author contact information: ** drh@sqlite.org ** ******************************************************************************* ** ** This file contains code used to import the content of a Git ** repository in the git-fast-import format as a new Fossil ** repository. */ #include "config.h" #include "import.h" #include #if INTERFACE /* ** A single file change record. */ struct ImportFile { char *zName; /* Name of a file */ char *zUuid; /* UUID of the file */ char *zPrior; /* Prior name if the name was changed */ char isFrom; /* True if obtained from the parent */ char isExe; /* True if executable */ char isLink; /* True if symlink */ }; #endif /* ** State information about an on-going fast-import parse. */ static struct { void (*xFinish)(void); /* Function to finish a prior record */ int nData; /* Bytes of data */ char *zTag; /* Name of a tag */ char *zBranch; /* Name of a branch for a commit */ char *zPrevBranch; /* The branch of the previous check-in */ char *aData; /* Data content */ char *zMark; /* The current mark */ char *zDate; /* Date/time stamp */ char *zUser; /* User name */ char *zComment; /* Comment of a commit */ char *zFrom; /* from value as a UUID */ char *zPrevCheckin; /* Name of the previous check-in */ char *zFromMark; /* The mark of the "from" field */ int nMerge; /* Number of merge values */ int nMergeAlloc; /* Number of slots in azMerge[] */ char **azMerge; /* Merge values */ int nFile; /* Number of aFile values */ int nFileAlloc; /* Number of slots in aFile[] */ ImportFile *aFile; /* Information about files in a commit */ int fromLoaded; /* True zFrom content loaded into aFile[] */ int hasLinks; /* True if git repository contains symlinks */ int tagCommit; /* True if the commit adds a tag */ } gg; /* ** Duplicate a string. */ char *fossil_strdup(const char *zOrig){ char *z = 0; if( zOrig ){ int n = strlen(zOrig); z = fossil_malloc( n+1 ); memcpy(z, zOrig, n+1); } return z; } /* ** A no-op "xFinish" method */ static void finish_noop(void){} /* ** Deallocate the state information. ** ** The azMerge[] and aFile[] arrays are zeroed by allocated space is ** retained unless the freeAll flag is set. */ static void import_reset(int freeAll){ int i; gg.xFinish = 0; fossil_free(gg.zTag); gg.zTag = 0; fossil_free(gg.zBranch); gg.zBranch = 0; fossil_free(gg.aData); gg.aData = 0; fossil_free(gg.zMark); gg.zMark = 0; fossil_free(gg.zDate); gg.zDate = 0; fossil_free(gg.zUser); gg.zUser = 0; fossil_free(gg.zComment); gg.zComment = 0; fossil_free(gg.zFrom); gg.zFrom = 0; fossil_free(gg.zFromMark); gg.zFromMark = 0; for(i=0; izName, pB->zName); } /* ** Compare two strings for sorting. */ static int string_cmp(const void *pLeft, const void *pRight){ const char *zLeft = *(const char **)pLeft; const char *zRight = *(const char **)pRight; return fossil_strcmp(zLeft, zRight); } /* Forward reference */ static void import_prior_files(void); /* ** Use data accumulated in gg from a "commit" record to add a new ** manifest artifact to the BLOB table. */ static void finish_commit(void){ int i; char *zFromBranch; char *aTCard[4]; /* Array of T cards for manifest */ int nTCard = 0; /* Entries used in aTCard[] */ Blob record, cksum; import_prior_files(); qsort(gg.aFile, gg.nFile, sizeof(gg.aFile[0]), mfile_cmp); blob_zero(&record); blob_appendf(&record, "C %F\n", gg.zComment); blob_appendf(&record, "D %s\n", gg.zDate); for(i=0; i=gg.nFileAlloc ){ gg.nFileAlloc = gg.nFileAlloc*2 + 100; gg.aFile = fossil_realloc(gg.aFile, gg.nFileAlloc*sizeof(gg.aFile[0])); } pFile = &gg.aFile[gg.nFile++]; memset(pFile, 0, sizeof(*pFile)); return pFile; } /* ** Load all file information out of the gg.zFrom check-in */ static void import_prior_files(void){ Manifest *p; int rid; ManifestFile *pOld; ImportFile *pNew; if( gg.fromLoaded ) return; gg.fromLoaded = 1; if( gg.zFrom==0 && gg.zPrevCheckin!=0 && fossil_strcmp(gg.zBranch, gg.zPrevBranch)==0 ){ gg.zFrom = gg.zPrevCheckin; gg.zPrevCheckin = 0; } if( gg.zFrom==0 ) return; rid = fast_uuid_to_rid(gg.zFrom); if( rid==0 ) return; p = manifest_get(rid, CFTYPE_MANIFEST, 0); if( p==0 ) return; manifest_file_rewind(p); while( (pOld = manifest_file_next(p, 0))!=0 ){ pNew = import_add_file(); pNew->zName = fossil_strdup(pOld->zName); pNew->isExe = pOld->zPerm && strstr(pOld->zPerm, "x")!=0; pNew->isLink = pOld->zPerm && strstr(pOld->zPerm, "l")!=0; pNew->zUuid = fossil_strdup(pOld->zUuid); pNew->isFrom = 1; } manifest_destroy(p); } /* ** Locate a file in the gg.aFile[] array by its name. Begin the search ** with the *pI-th file. Update *pI to be one past the file found. ** Do not search past the mx-th file. */ static ImportFile *import_find_file(const char *zName, int *pI, int mx){ int i = *pI; int nName = strlen(zName); while( i=0 && z[i]!='/'; i--){} gg.tagCommit = strncmp(&z[i-4], "tags", 4)==0; /* True for pattern B */ if( z[i+1]!=0 ) z += i+1; if( fossil_strcmp(z, "master")==0 ) z = "trunk"; gg.zBranch = fossil_strdup(z); gg.fromLoaded = 0; }else if( strncmp(zLine, "tag ", 4)==0 ){ gg.xFinish(); gg.xFinish = finish_tag; trim_newline(&zLine[4]); gg.zTag = fossil_strdup(&zLine[4]); }else if( strncmp(zLine, "reset ", 4)==0 ){ gg.xFinish(); }else if( strncmp(zLine, "checkpoint", 10)==0 ){ gg.xFinish(); }else if( strncmp(zLine, "feature", 7)==0 ){ gg.xFinish(); }else if( strncmp(zLine, "option", 6)==0 ){ gg.xFinish(); }else if( strncmp(zLine, "progress ", 9)==0 ){ gg.xFinish(); trim_newline(&zLine[9]); fossil_print("%s\n", &zLine[9]); fflush(stdout); }else if( strncmp(zLine, "data ", 5)==0 ){ fossil_free(gg.aData); gg.aData = 0; gg.nData = atoi(&zLine[5]); if( gg.nData ){ int got; gg.aData = fossil_malloc( gg.nData+1 ); got = fread(gg.aData, 1, gg.nData, pIn); if( got!=gg.nData ){ fossil_fatal("short read: got %d of %d bytes", got, gg.nData); } gg.aData[got] = 0; if( gg.zComment==0 && gg.xFinish==finish_commit ){ gg.zComment = gg.aData; gg.aData = 0; gg.nData = 0; } } }else if( strncmp(zLine, "author ", 7)==0 ){ /* No-op */ }else if( strncmp(zLine, "mark ", 5)==0 ){ trim_newline(&zLine[5]); fossil_free(gg.zMark); gg.zMark = fossil_strdup(&zLine[5]); }else if( strncmp(zLine, "tagger ", 7)==0 || strncmp(zLine, "committer ",10)==0 ){ sqlite3_int64 secSince1970; for(i=0; zLine[i] && zLine[i]!='<'; i++){} if( zLine[i]==0 ) goto malformed_line; z = &zLine[i+1]; for(i=i+1; zLine[i] && zLine[i]!='>'; i++){} if( zLine[i]==0 ) goto malformed_line; zLine[i] = 0; fossil_free(gg.zUser); gg.zUser = fossil_strdup(z); secSince1970 = 0; for(i=i+2; fossil_isdigit(zLine[i]); i++){ secSince1970 = secSince1970*10 + zLine[i] - '0'; } fossil_free(gg.zDate); gg.zDate = db_text(0, "SELECT datetime(%lld, 'unixepoch')", secSince1970); gg.zDate[10] = 'T'; }else if( strncmp(zLine, "from ", 5)==0 ){ trim_newline(&zLine[5]); fossil_free(gg.zFromMark); gg.zFromMark = fossil_strdup(&zLine[5]); fossil_free(gg.zFrom); gg.zFrom = resolve_committish(&zLine[5]); }else if( strncmp(zLine, "merge ", 6)==0 ){ trim_newline(&zLine[6]); if( gg.nMerge>=gg.nMergeAlloc ){ gg.nMergeAlloc = gg.nMergeAlloc*2 + 10; gg.azMerge = fossil_realloc(gg.azMerge, gg.nMergeAlloc*sizeof(char*)); } gg.azMerge[gg.nMerge] = resolve_committish(&zLine[6]); if( gg.azMerge[gg.nMerge] ) gg.nMerge++; }else if( strncmp(zLine, "M ", 2)==0 ){ import_prior_files(); z = &zLine[2]; zPerm = next_token(&z); zUuid = next_token(&z); zName = rest_of_line(&z); dequote_git_filename(zName); i = 0; pFile = import_find_file(zName, &i, gg.nFile); if( pFile==0 ){ pFile = import_add_file(); pFile->zName = fossil_strdup(zName); } pFile->isExe = (fossil_strcmp(zPerm, "100755")==0); pFile->isLink = (fossil_strcmp(zPerm, "120000")==0); fossil_free(pFile->zUuid); pFile->zUuid = resolve_committish(zUuid); pFile->isFrom = 0; }else if( strncmp(zLine, "D ", 2)==0 ){ import_prior_files(); z = &zLine[2]; zName = rest_of_line(&z); dequote_git_filename(zName); i = 0; while( (pFile = import_find_file(zName, &i, gg.nFile))!=0 ){ if( pFile->isFrom==0 ) continue; fossil_free(pFile->zName); fossil_free(pFile->zPrior); fossil_free(pFile->zUuid); *pFile = gg.aFile[--gg.nFile]; i--; } }else if( strncmp(zLine, "C ", 2)==0 ){ int nFrom; import_prior_files(); z = &zLine[2]; zFrom = next_token(&z); zTo = rest_of_line(&z); i = 0; mx = gg.nFile; nFrom = strlen(zFrom); while( (pFile = import_find_file(zFrom, &i, mx))!=0 ){ if( pFile->isFrom==0 ) continue; pNew = import_add_file(); pFile = &gg.aFile[i-1]; if( strlen(pFile->zName)>nFrom ){ pNew->zName = mprintf("%s%s", zTo, pFile->zName[nFrom]); }else{ pNew->zName = fossil_strdup(pFile->zName); } pNew->isExe = pFile->isExe; pNew->isLink = pFile->isLink; pNew->zUuid = fossil_strdup(pFile->zUuid); pNew->isFrom = 0; } }else if( strncmp(zLine, "R ", 2)==0 ){ int nFrom; import_prior_files(); z = &zLine[2]; zFrom = next_token(&z); zTo = rest_of_line(&z); i = 0; nFrom = strlen(zFrom); while( (pFile = import_find_file(zFrom, &i, gg.nFile))!=0 ){ if( pFile->isFrom==0 ) continue; pNew = import_add_file(); pFile = &gg.aFile[i-1]; if( strlen(pFile->zName)>nFrom ){ pNew->zName = mprintf("%s%s", zTo, pFile->zName[nFrom]); }else{ pNew->zName = fossil_strdup(pFile->zName); } pNew->zPrior = pFile->zName; pNew->isExe = pFile->isExe; pNew->isLink = pFile->isLink; pNew->zUuid = pFile->zUuid; pNew->isFrom = 0; gg.nFile--; *pFile = *pNew; memset(pNew, 0, sizeof(*pNew)); } fossil_fatal("cannot handle R records, use --full-tree"); }else if( strncmp(zLine, "deleteall", 9)==0 ){ gg.fromLoaded = 1; }else if( strncmp(zLine, "N ", 2)==0 ){ /* No-op */ }else { goto malformed_line; } } gg.xFinish(); if( gg.hasLinks ){ db_set_int("allow-symlinks", 1, 0); } import_reset(1); return; malformed_line: trim_newline(zLine); fossil_fatal("bad fast-import line: [%s]", zLine); return; } typedef struct { const char *zKey; const char *zVal; } KeyVal; typedef struct { KeyVal *aHeaders; int nHeaders; char *pRawProps; KeyVal *aProps; int nProps; Blob content; int nContent; } SvnRecord; #define svn_find_header(rec, zHeader) \ svn_find_keyval((rec).aHeaders, (rec).nHeaders, (zHeader)) #define svn_find_prop(rec, zProp) \ svn_find_keyval((rec).aProps, (rec).nProps, (zProp)) static const char *svn_find_keyval( KeyVal *aKeyVal, int nKeyVal, const char *zKey ){ int i; for(i=0; inHeaders; i++){ fossil_free(rec->aHeaders[i].zKey); } fossil_free(rec->aHeaders); fossil_free(rec->aProps); fossil_free(rec->pRawProps); blob_reset(&rec->content); } static int svn_read_headers(FILE *pIn, SvnRecord *rec){ char zLine[1000]; rec->aHeaders = 0; rec->nHeaders = 0; while( fgets(zLine, sizeof(zLine), pIn) ){ if( zLine[0]!='\n' ) break; } if( feof(pIn) ) return 0; do{ char *sep; if( zLine[0]=='\n' ) break; rec->nHeaders += 1; rec->aHeaders = fossil_realloc(rec->aHeaders, sizeof(rec->aHeaders[0])*rec->nHeaders); rec->aHeaders[rec->nHeaders-1].zKey = mprintf("%s", zLine); sep = strchr(rec->aHeaders[rec->nHeaders-1].zKey, ':'); if( !sep ){ trim_newline(zLine); fossil_fatal("bad header line: [%s]", zLine); } *sep = 0; rec->aHeaders[rec->nHeaders-1].zVal = sep+1; sep = strchr(rec->aHeaders[rec->nHeaders-1].zVal, '\n'); *sep = 0; while(rec->aHeaders[rec->nHeaders-1].zVal && fossil_isspace(*(rec->aHeaders[rec->nHeaders-1].zVal)) ) { rec->aHeaders[rec->nHeaders-1].zVal++; } }while( fgets(zLine, sizeof(zLine), pIn) ); if( zLine[0]!='\n' ){ trim_newline(zLine); fossil_fatal("svn-dump data ended unexpectedly"); } return 1; } static void svn_read_props(FILE *pIn, SvnRecord *rec){ int nRawProps = 0; char *pRawProps; const char *zLen; rec->pRawProps = 0; rec->aProps = 0; rec->nProps = 0; zLen = svn_find_header(*rec, "Prop-content-length"); if( zLen ){ nRawProps = atoi(zLen); } if( nRawProps ){ int got; char *zLine; rec->pRawProps = pRawProps = fossil_malloc( nRawProps ); got = fread(rec->pRawProps, 1, nRawProps, pIn); if( got!=nRawProps ){ fossil_fatal("short read: got %d of %d bytes", got, nRawProps); } if( memcmp(&pRawProps[got-10], "PROPS-END\n", 10)!=0 ){ fossil_fatal("svn-dump data ended unexpectedly"); } zLine = pRawProps; while( zLine<(pRawProps+nRawProps-10) ){ char *eol; int propLen; if( zLine[0]!='K' ){ fossil_fatal("svn-dump data format broken"); } propLen = atoi(&zLine[2]); eol = strchr(zLine, '\n'); zLine = eol+1; eol = zLine+propLen; if( *eol!='\n' ){ fossil_fatal("svn-dump data format broken"); } *eol = 0; rec->nProps += 1; rec->aProps = fossil_realloc(rec->aProps, sizeof(rec->aProps[0])*rec->nProps); rec->aProps[rec->nProps-1].zKey = zLine; zLine = eol+1; if( zLine[0]!='V' ){ fossil_fatal("svn-dump data format broken"); } propLen = atoi(&zLine[2]); eol = strchr(zLine, '\n'); zLine = eol+1; eol = zLine+propLen; if( *eol!='\n' ){ fossil_fatal("svn-dump data format broken"); } *eol = 0; rec->aProps[rec->nProps-1].zVal = zLine; zLine = eol+1; } } } static int svn_read_rec(FILE *pIn, SvnRecord *rec){ const char *zLen; int nLen = 0; if( svn_read_headers(pIn, rec)==0 ) return 0; svn_read_props(pIn, rec); blob_zero(&rec->content); zLen = svn_find_header(*rec, "Text-content-length"); if( zLen ){ nLen = atoi(zLen); } if( nLen>=0 ){ blob_read_from_channel(&rec->content, pIn, nLen); if( blob_size(&rec->content)!=nLen ){ fossil_fatal("short read: got %d of %d bytes", blob_size(&rec->content), nLen ); } } return 1; } static void svn_create_manifests(){ Blob manifest; Stmt qRev; Stmt qFiles; int parentRid = 0; blob_zero(&manifest); db_prepare(&qRev, "SELECT trev, tuser, tmsg, ttime FROM xrevisions" " ORDER BY trev"); db_prepare(&qFiles, "SELECT tpath, uuid, tperm" " FROM xfiles JOIN blob ON xfiles.trid=blob.rid" " WHERE trev=:rev ORDER BY tpath"); while( db_step(&qRev)==SQLITE_ROW ){ int rev = db_column_int(&qRev, 0); const char *zUser = db_column_text(&qRev, 1); const char *zMsg = db_column_text(&qRev, 2); const char *zTime = db_column_text(&qRev, 3); Blob mcksum; blob_reset(&manifest); if( zMsg ){ blob_appendf(&manifest, "C %F\n", zMsg); }else{ blob_append(&manifest, "C (no\\scomment)\n", 16); } blob_appendf(&manifest, "D %s\n", zTime); db_bind_int(&qFiles, ":rev", rev); while( db_step(&qFiles)==SQLITE_ROW ){ const char *zFile = db_column_text(&qFiles, 0); const char *zUuid = db_column_text(&qFiles, 1); const char *zPerm = db_column_text(&qFiles, 2); blob_appendf(&manifest, "F %F %s %s\n", zFile, zUuid, zPerm); } db_reset(&qFiles); if( parentRid>0 ){ const char *zParent; zParent = db_text(0, "SELECT uuid FROM blob WHERE rid=%d", parentRid); blob_appendf(&manifest, "P %s\n", zParent); fossil_free(zParent); }else{ blob_appendf(&manifest, "R d41d8cd98f00b204e9800998ecf8427e\n"); blob_appendf(&manifest, "T *branch * trunk\n"); blob_appendf(&manifest, "T *sym-trunk *\n"); } if( zUser ){ blob_appendf(&manifest, "U %F\n", zUser); }else{ const char *zUserOvrd = find_option("user-override",0,1); blob_appendf(&manifest, "U %F\n", zUserOvrd ? zUserOvrd : login_name()); } md5sum_blob(&manifest, &mcksum); blob_appendf(&manifest, "Z %b\n", &mcksum); blob_reset(&mcksum); parentRid = content_put(&manifest); } db_finalize(&qRev); db_finalize(&qFiles); } /* ** Read the svn-dump format from pIn and insert the corresponding ** content into the database. */ static void svn_dump_import(FILE *pIn){ SvnRecord rec; int ver; const char *zTemp; const char *zUuid; Stmt insRev; Stmt insFile; int rev = 0; /* version */ if( svn_read_rec(pIn, &rec) && (zTemp = svn_find_header(rec, "SVN-fs-dump-format-version")) ){ ver = atoi(zTemp); if( ver!=2 ){ fossil_fatal("Unknown svn-dump format version: %d", ver); } }else{ fossil_fatal("Input is not an svn-dump!"); } svn_free_rec(&rec); /* UUID */ if( !svn_read_rec(pIn, &rec) || !(zUuid = svn_find_header(rec, "UUID")) ){ fossil_fatal("Missing UUID!"); } svn_free_rec(&rec); /* content */ db_prepare(&insRev, "INSERT INTO xrevisions (trev, tuser, tmsg, ttime)" "VALUES(:rev, :user, :msg, :time)" ); db_prepare(&insFile, "INSERT INTO xfiles (trev, tpath, trid, tperm)" "VALUES(:rev, :path, :rid, :perm)" ); while( svn_read_rec(pIn, &rec) ){ if( zTemp = svn_find_header(rec, "Revision-number") ){ const char *zUser = svn_find_prop(rec, "svn:author"); const char *zLog = svn_find_prop(rec, "svn:log"); const char *zDate = svn_find_prop(rec, "svn:date"); zDate = date_in_standard_format(zDate); rev = atoi(zTemp); db_bind_int(&insRev, ":rev", rev); db_bind_text(&insRev, ":user", zUser); db_bind_text(&insRev, ":msg", zLog); db_bind_text(&insRev, ":time", zDate); db_step(&insRev); db_reset(&insRev); fossil_free(zDate); }else if( zTemp = svn_find_header(rec, "Node-path") ){ const char *zPath = zTemp; const char *zAction = svn_find_header(rec, "Node-action"); const char *zKind = svn_find_header(rec, "Node-kind"); const char *zSrcPath = svn_find_header(rec, "Node-copyfrom-path"); const char *zPerm = svn_find_prop(rec, "svn:executable") ? "x" : 0; int srcRev = -1; int rid = 0; if( zKind && strncmp(zKind, "dir", 3)==0 ){ svn_free_rec(&rec); continue; } zTemp = svn_find_header(rec, "Node-copyfrom-rev"); if( zTemp ){ srcRev = atoi(zTemp); } rid = content_put(&rec.content); if( strncmp(zAction, "add", 3)==0 ){ db_bind_int(&insFile, ":rev", rev); db_bind_int(&insFile, ":rid", rid); db_bind_text(&insFile, ":path", zPath); db_bind_text(&insFile, ":perm", zPerm); db_step(&insFile); db_reset(&insFile); }else if( strncmp(zAction, "change", 6)==0 ){ db_bind_int(&insFile, ":rev", rev); db_bind_int(&insFile, ":rid", rid); db_bind_text(&insFile, ":path", zPath); db_bind_text(&insFile, ":perm", zPerm); db_step(&insFile); db_reset(&insFile); }else if( strncmp(zAction, "delete", 6)==0 ){ }else if( strncmp(zAction, "replace", 7)==0 ){ }else{ } }else{ fossil_fatal("Unknown record type"); } svn_free_rec(&rec); } svn_create_manifests(); db_finalize(&insRev); db_finalize(&insFile); } /* ** COMMAND: import ** ** Usage: %fossil import FORMAT ?OPTIONS? NEW-REPOSITORY ?INPUT-FILE? ** ** Read interchange format generated by another VCS and use it to ** construct a new Fossil repository named by the NEW-REPOSITORY ** argument. If no input file is supplied the interchange format ** data is read from standard input. ** ** The following formats are currently understood by this command ** ** --git Import from the git-fast-export file format ** --svn Import from the svnadmin-dump file format ** ** The --incremental option allows an existing repository to be extended ** with new content. ** ** Options: ** --incremental allow importing into an existing repository ** ** See also: export */ void git_import_cmd(void){ char *zPassword; FILE *pIn; Stmt q; int forceFlag = find_option("force", "f", 0)!=0; int incrFlag = find_option("incremental", "i", 0)!=0; int gitFlag = find_option("git",0,0)!=0; int svnFlag = find_option("svn",0,0)!=0; verify_all_options(); if( g.argc!=3 && g.argc!=4 ){ usage("FORMAT REPOSITORY-NAME"); } if( g.argc==4 ){ pIn = fossil_fopen(g.argv[3], "rb"); }else{ pIn = stdin; fossil_binary_mode(pIn); } if( !incrFlag ){ if( forceFlag ) file_delete(g.argv[2]); db_create_repository(g.argv[2]); } db_open_repository(g.argv[2]); db_open_config(0); db_begin_transaction(); if( !incrFlag ) db_initial_setup(0, 0, 0, 1); if( gitFlag ){ /* The following temp-tables are used to hold information needed for ** the import. ** ** The XMARK table provides a mapping from fast-import "marks" and symbols ** into artifact ids (UUIDs - the 40-byte hex SHA1 hash of artifacts). ** Given any valid fast-import symbol, the corresponding fossil rid and ** uuid can found by searching against the xmark.tname field. ** ** The XBRANCH table maps commit marks and symbols into the branch those ** commits belong to. If xbranch.tname is a fast-import symbol for a ** checkin then xbranch.brnm is the branch that checkin is part of. ** ** The XTAG table records information about tags that need to be applied ** to various branches after the import finishes. The xtag.tcontent field ** contains the text of an artifact that will add a tag to a check-in. ** The git-fast-export file format might specify the same tag multiple ** times but only the last tag should be used. And we do not know which ** occurrence of the tag is the last until the import finishes. */ db_multi_exec( "CREATE TEMP TABLE xmark(tname TEXT UNIQUE, trid INT, tuuid TEXT);" "CREATE TEMP TABLE xbranch(tname TEXT UNIQUE, brnm TEXT);" "CREATE TEMP TABLE xtag(tname TEXT UNIQUE, tcontent TEXT);" ); git_fast_import(pIn); db_prepare(&q, "SELECT tcontent FROM xtag"); while( db_step(&q)==SQLITE_ROW ){ Blob record; db_ephemeral_blob(&q, 0, &record); fast_insert_content(&record, 0, 0); import_reset(0); } db_finalize(&q); }else if( svnFlag ){ db_multi_exec( "CREATE TEMP TABLE xrevisions(" " trev INT, tuser TEXT, tmsg TEXT, ttime DATETIME" ");" "CREATE TEMP TABLE xfiles(" " trev INT, tpath TEXT, trid TEXT, tperm TEXT" ");" ); svn_dump_import(pIn); } db_end_transaction(0); db_begin_transaction(); fossil_print("Rebuilding repository meta-data...\n"); rebuild_db(0, 1, !incrFlag); verify_cancel(); db_end_transaction(0); fossil_print("Vacuuming..."); fflush(stdout); db_multi_exec("VACUUM"); fossil_print(" ok\n"); if( !incrFlag ){ fossil_print("project-id: %s\n", db_get("project-code", 0)); fossil_print("server-id: %s\n", db_get("server-code", 0)); zPassword = db_text(0, "SELECT pw FROM user WHERE login=%Q", g.zLogin); fossil_print("admin-user: %s (password is \"%s\")\n", g.zLogin, zPassword); } }