2007-03-30 15:24:35 +02:00
|
|
|
#include "config.h"
|
2006-11-30 18:43:04 +01:00
|
|
|
#include "local-store.hh"
|
2006-09-04 23:06:23 +02:00
|
|
|
#include "globals.hh"
|
|
|
|
#include "archive.hh"
|
|
|
|
#include "pathlocks.hh"
|
2007-02-21 16:45:32 +01:00
|
|
|
#include "worker-protocol.hh"
|
2010-02-22 12:15:50 +01:00
|
|
|
#include "derivations.hh"
|
2012-02-15 01:31:56 +01:00
|
|
|
#include "immutable.hh"
|
2012-07-30 21:42:18 +02:00
|
|
|
|
2003-06-23 15:27:59 +02:00
|
|
|
#include <iostream>
|
2003-12-22 17:40:46 +01:00
|
|
|
#include <algorithm>
|
2003-06-23 15:27:59 +02:00
|
|
|
|
2005-01-19 17:39:47 +01:00
|
|
|
#include <sys/types.h>
|
|
|
|
#include <sys/stat.h>
|
2012-07-23 22:52:25 +02:00
|
|
|
#include <sys/time.h>
|
2003-10-15 14:42:39 +02:00
|
|
|
#include <unistd.h>
|
2005-01-19 17:39:47 +01:00
|
|
|
#include <utime.h>
|
2008-06-09 15:52:45 +02:00
|
|
|
#include <fcntl.h>
|
2008-07-18 17:34:46 +02:00
|
|
|
#include <errno.h>
|
2009-09-24 09:39:55 +02:00
|
|
|
#include <stdio.h>
|
2010-12-17 18:23:15 +01:00
|
|
|
#include <time.h>
|
2003-06-23 15:27:59 +02:00
|
|
|
|
2012-09-19 21:45:29 +02:00
|
|
|
#if HAVE_UNSHARE
|
|
|
|
#include <sched.h>
|
|
|
|
#include <sys/mount.h>
|
|
|
|
#endif
|
|
|
|
|
2010-02-18 15:30:42 +01:00
|
|
|
#include <sqlite3.h>
|
|
|
|
|
2006-11-30 19:35:36 +01:00
|
|
|
|
2006-09-04 23:06:23 +02:00
|
|
|
namespace nix {
|
2003-06-23 15:27:59 +02:00
|
|
|
|
2010-12-05 19:23:19 +01:00
|
|
|
|
|
|
|
MakeError(SQLiteError, Error);
|
|
|
|
MakeError(SQLiteBusy, SQLiteError);
|
|
|
|
|
|
|
|
|
|
|
|
static void throwSQLiteError(sqlite3 * db, const format & f)
|
|
|
|
__attribute__ ((noreturn));
|
|
|
|
|
|
|
|
static void throwSQLiteError(sqlite3 * db, const format & f)
|
2010-02-18 14:16:59 +01:00
|
|
|
{
|
2010-12-05 19:23:19 +01:00
|
|
|
int err = sqlite3_errcode(db);
|
|
|
|
if (err == SQLITE_BUSY) {
|
|
|
|
printMsg(lvlError, "warning: SQLite database is busy");
|
2010-12-17 18:23:15 +01:00
|
|
|
/* Sleep for a while since retrying the transaction right away
|
|
|
|
is likely to fail again. */
|
|
|
|
#if HAVE_NANOSLEEP
|
|
|
|
struct timespec t;
|
|
|
|
t.tv_sec = 0;
|
|
|
|
t.tv_nsec = 100 * 1000 * 1000; /* 0.1s */
|
|
|
|
nanosleep(&t, 0);
|
|
|
|
#else
|
|
|
|
sleep(1);
|
|
|
|
#endif
|
2010-12-05 19:23:19 +01:00
|
|
|
throw SQLiteBusy(format("%1%: %2%") % f.str() % sqlite3_errmsg(db));
|
2010-02-18 14:16:59 +01:00
|
|
|
}
|
2010-12-05 19:23:19 +01:00
|
|
|
else
|
|
|
|
throw SQLiteError(format("%1%: %2%") % f.str() % sqlite3_errmsg(db));
|
|
|
|
}
|
2010-02-18 14:16:59 +01:00
|
|
|
|
|
|
|
|
2010-02-18 15:30:42 +01:00
|
|
|
SQLite::~SQLite()
|
|
|
|
{
|
|
|
|
try {
|
|
|
|
if (db && sqlite3_close(db) != SQLITE_OK)
|
2010-12-05 19:23:19 +01:00
|
|
|
throwSQLiteError(db, "closing database");
|
2010-02-18 15:30:42 +01:00
|
|
|
} catch (...) {
|
|
|
|
ignoreException();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void SQLiteStmt::create(sqlite3 * db, const string & s)
|
|
|
|
{
|
2010-03-02 20:04:17 +01:00
|
|
|
checkInterrupt();
|
2010-02-18 15:30:42 +01:00
|
|
|
assert(!stmt);
|
|
|
|
if (sqlite3_prepare_v2(db, s.c_str(), -1, &stmt, 0) != SQLITE_OK)
|
2010-12-05 19:23:19 +01:00
|
|
|
throwSQLiteError(db, "creating statement");
|
2010-02-18 15:30:42 +01:00
|
|
|
this->db = db;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-02-18 16:11:08 +01:00
|
|
|
void SQLiteStmt::reset()
|
|
|
|
{
|
|
|
|
assert(stmt);
|
2010-12-08 19:19:15 +01:00
|
|
|
/* Note: sqlite3_reset() returns the error code for the most
|
|
|
|
recent call to sqlite3_step(). So ignore it. */
|
|
|
|
sqlite3_reset(stmt);
|
2010-02-19 17:43:25 +01:00
|
|
|
curArg = 1;
|
2010-02-18 16:11:08 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-02-18 15:30:42 +01:00
|
|
|
SQLiteStmt::~SQLiteStmt()
|
|
|
|
{
|
|
|
|
try {
|
|
|
|
if (stmt && sqlite3_finalize(stmt) != SQLITE_OK)
|
2010-12-05 19:23:19 +01:00
|
|
|
throwSQLiteError(db, "finalizing statement");
|
2010-02-18 15:30:42 +01:00
|
|
|
} catch (...) {
|
|
|
|
ignoreException();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-02-19 17:43:25 +01:00
|
|
|
void SQLiteStmt::bind(const string & value)
|
|
|
|
{
|
|
|
|
if (sqlite3_bind_text(stmt, curArg++, value.c_str(), -1, SQLITE_TRANSIENT) != SQLITE_OK)
|
2010-12-05 19:23:19 +01:00
|
|
|
throwSQLiteError(db, "binding argument");
|
2010-02-19 17:43:25 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void SQLiteStmt::bind(int value)
|
|
|
|
{
|
|
|
|
if (sqlite3_bind_int(stmt, curArg++, value) != SQLITE_OK)
|
2010-12-05 19:23:19 +01:00
|
|
|
throwSQLiteError(db, "binding argument");
|
2010-02-19 17:43:25 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-11-17 13:40:52 +01:00
|
|
|
void SQLiteStmt::bind64(long long value)
|
|
|
|
{
|
|
|
|
if (sqlite3_bind_int64(stmt, curArg++, value) != SQLITE_OK)
|
2010-12-05 19:23:19 +01:00
|
|
|
throwSQLiteError(db, "binding argument");
|
2010-11-17 13:40:52 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-02-19 17:43:25 +01:00
|
|
|
void SQLiteStmt::bind()
|
|
|
|
{
|
|
|
|
if (sqlite3_bind_null(stmt, curArg++) != SQLITE_OK)
|
2010-12-05 19:23:19 +01:00
|
|
|
throwSQLiteError(db, "binding argument");
|
2010-02-19 17:43:25 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-02-19 17:04:51 +01:00
|
|
|
/* Helper class to ensure that prepared statements are reset when
|
|
|
|
leaving the scope that uses them. Unfinished prepared statements
|
|
|
|
prevent transactions from being aborted, and can cause locks to be
|
|
|
|
kept when they should be released. */
|
|
|
|
struct SQLiteStmtUse
|
|
|
|
{
|
|
|
|
SQLiteStmt & stmt;
|
|
|
|
SQLiteStmtUse(SQLiteStmt & stmt) : stmt(stmt)
|
|
|
|
{
|
|
|
|
stmt.reset();
|
|
|
|
}
|
|
|
|
~SQLiteStmtUse()
|
|
|
|
{
|
2010-02-22 15:18:55 +01:00
|
|
|
try {
|
|
|
|
stmt.reset();
|
|
|
|
} catch (...) {
|
|
|
|
ignoreException();
|
|
|
|
}
|
2010-02-19 17:04:51 +01:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
|
2012-07-30 21:42:18 +02:00
|
|
|
struct SQLiteTxn
|
2010-02-18 15:40:07 +01:00
|
|
|
{
|
|
|
|
bool active;
|
|
|
|
sqlite3 * db;
|
2012-07-30 21:42:18 +02:00
|
|
|
|
2010-02-18 15:40:07 +01:00
|
|
|
SQLiteTxn(sqlite3 * db) : active(false) {
|
|
|
|
this->db = db;
|
|
|
|
if (sqlite3_exec(db, "begin;", 0, 0, 0) != SQLITE_OK)
|
2010-12-05 19:23:19 +01:00
|
|
|
throwSQLiteError(db, "starting transaction");
|
2010-02-18 15:40:07 +01:00
|
|
|
active = true;
|
|
|
|
}
|
|
|
|
|
2012-07-30 21:42:18 +02:00
|
|
|
void commit()
|
2010-02-18 15:40:07 +01:00
|
|
|
{
|
|
|
|
if (sqlite3_exec(db, "commit;", 0, 0, 0) != SQLITE_OK)
|
2010-12-05 19:23:19 +01:00
|
|
|
throwSQLiteError(db, "committing transaction");
|
2010-02-18 15:40:07 +01:00
|
|
|
active = false;
|
|
|
|
}
|
2012-07-30 21:42:18 +02:00
|
|
|
|
|
|
|
~SQLiteTxn()
|
2010-02-18 15:40:07 +01:00
|
|
|
{
|
|
|
|
try {
|
|
|
|
if (active && sqlite3_exec(db, "rollback;", 0, 0, 0) != SQLITE_OK)
|
2010-12-05 19:23:19 +01:00
|
|
|
throwSQLiteError(db, "aborting transaction");
|
2010-02-18 15:40:07 +01:00
|
|
|
} catch (...) {
|
|
|
|
ignoreException();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
|
2006-03-10 23:27:26 +01:00
|
|
|
void checkStoreNotSymlink()
|
|
|
|
{
|
|
|
|
if (getEnv("NIX_IGNORE_SYMLINK_STORE") == "1") return;
|
2012-07-31 01:55:41 +02:00
|
|
|
Path path = settings.nixStore;
|
2006-03-10 23:27:26 +01:00
|
|
|
struct stat st;
|
|
|
|
while (path != "/") {
|
|
|
|
if (lstat(path.c_str(), &st))
|
|
|
|
throw SysError(format("getting status of `%1%'") % path);
|
|
|
|
if (S_ISLNK(st.st_mode))
|
|
|
|
throw Error(format(
|
|
|
|
"the path `%1%' is a symlink; "
|
|
|
|
"this is not allowed for the Nix store and its parent directories")
|
|
|
|
% path);
|
|
|
|
path = dirOf(path);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-05-30 04:59:12 +02:00
|
|
|
LocalStore::LocalStore(bool reserveSpace)
|
2003-10-15 14:42:39 +02:00
|
|
|
{
|
2012-07-31 01:55:41 +02:00
|
|
|
schemaPath = settings.nixDBPath + "/schema";
|
2012-07-30 21:42:18 +02:00
|
|
|
|
2012-07-31 01:55:41 +02:00
|
|
|
if (settings.readOnlyMode) {
|
2010-02-24 17:44:43 +01:00
|
|
|
openDB(false);
|
|
|
|
return;
|
|
|
|
}
|
2005-02-09 10:50:29 +01:00
|
|
|
|
2009-09-23 19:05:51 +02:00
|
|
|
/* Create missing state directories if they don't already exist. */
|
2012-07-31 01:55:41 +02:00
|
|
|
createDirs(settings.nixStore);
|
2012-09-25 22:30:08 +02:00
|
|
|
makeStoreWritable();
|
2012-07-31 01:55:41 +02:00
|
|
|
createDirs(linksDir = settings.nixStore + "/.links");
|
|
|
|
Path profilesDir = settings.nixStateDir + "/profiles";
|
|
|
|
createDirs(settings.nixStateDir + "/profiles");
|
|
|
|
createDirs(settings.nixStateDir + "/temproots");
|
|
|
|
createDirs(settings.nixDBPath);
|
|
|
|
Path gcRootsDir = settings.nixStateDir + "/gcroots";
|
2009-09-23 19:05:51 +02:00
|
|
|
if (!pathExists(gcRootsDir)) {
|
|
|
|
createDirs(gcRootsDir);
|
|
|
|
if (symlink(profilesDir.c_str(), (gcRootsDir + "/profiles").c_str()) == -1)
|
|
|
|
throw SysError(format("creating symlink to `%1%'") % profilesDir);
|
|
|
|
}
|
2012-07-30 21:42:18 +02:00
|
|
|
|
2006-03-10 23:27:26 +01:00
|
|
|
checkStoreNotSymlink();
|
|
|
|
|
2012-05-30 04:59:12 +02:00
|
|
|
/* We can't open a SQLite database if the disk is full. Since
|
|
|
|
this prevents the garbage collector from running when it's most
|
|
|
|
needed, we reserve some dummy space that we can free just
|
|
|
|
before doing a garbage collection. */
|
|
|
|
try {
|
2012-07-31 01:55:41 +02:00
|
|
|
Path reservedPath = settings.nixDBPath + "/reserved";
|
2012-05-30 04:59:12 +02:00
|
|
|
if (reserveSpace) {
|
|
|
|
struct stat st;
|
|
|
|
if (stat(reservedPath.c_str(), &st) == -1 ||
|
2012-07-31 01:55:41 +02:00
|
|
|
st.st_size != settings.reservedSize)
|
|
|
|
writeFile(reservedPath, string(settings.reservedSize, 'X'));
|
2012-05-30 04:59:12 +02:00
|
|
|
}
|
|
|
|
else
|
|
|
|
deletePath(reservedPath);
|
|
|
|
} catch (SysError & e) { /* don't care about errors */
|
|
|
|
}
|
|
|
|
|
2010-02-18 15:30:42 +01:00
|
|
|
/* Acquire the big fat lock in shared mode to make sure that no
|
|
|
|
schema upgrade is in progress. */
|
2008-07-18 17:34:46 +02:00
|
|
|
try {
|
2012-07-31 01:55:41 +02:00
|
|
|
Path globalLockPath = settings.nixDBPath + "/big-lock";
|
2008-07-18 17:34:46 +02:00
|
|
|
globalLock = openLockFile(globalLockPath.c_str(), true);
|
|
|
|
} catch (SysError & e) {
|
|
|
|
if (e.errNo != EACCES) throw;
|
2012-07-31 01:55:41 +02:00
|
|
|
settings.readOnlyMode = true;
|
2010-02-24 17:44:43 +01:00
|
|
|
openDB(false);
|
2008-07-18 17:34:46 +02:00
|
|
|
return;
|
|
|
|
}
|
2012-07-30 21:42:18 +02:00
|
|
|
|
2008-06-09 15:52:45 +02:00
|
|
|
if (!lockFile(globalLock, ltRead, false)) {
|
|
|
|
printMsg(lvlError, "waiting for the big Nix store lock...");
|
|
|
|
lockFile(globalLock, ltRead, true);
|
2006-02-16 14:19:15 +01:00
|
|
|
}
|
2010-02-18 15:30:42 +01:00
|
|
|
|
|
|
|
/* Check the current database schema and if necessary do an
|
2010-02-24 17:30:20 +01:00
|
|
|
upgrade. */
|
2008-06-09 15:52:45 +02:00
|
|
|
int curSchema = getSchema();
|
2005-02-09 10:50:29 +01:00
|
|
|
if (curSchema > nixSchemaVersion)
|
|
|
|
throw Error(format("current Nix store schema is version %1%, but I only support %2%")
|
|
|
|
% curSchema % nixSchemaVersion);
|
2012-07-30 21:42:18 +02:00
|
|
|
|
2010-02-24 17:30:20 +01:00
|
|
|
else if (curSchema == 0) { /* new store */
|
2010-02-18 15:30:42 +01:00
|
|
|
curSchema = nixSchemaVersion;
|
2010-02-24 17:30:20 +01:00
|
|
|
openDB(true);
|
|
|
|
writeFile(schemaPath, (format("%1%") % nixSchemaVersion).str());
|
|
|
|
}
|
2012-07-30 21:42:18 +02:00
|
|
|
|
2010-02-24 17:30:20 +01:00
|
|
|
else if (curSchema < nixSchemaVersion) {
|
|
|
|
if (curSchema < 5)
|
|
|
|
throw Error(
|
|
|
|
"Your Nix store has a database in Berkeley DB format,\n"
|
|
|
|
"which is no longer supported. To convert to the new format,\n"
|
|
|
|
"please upgrade Nix to version 0.12 first.");
|
2012-07-30 21:42:18 +02:00
|
|
|
|
2010-02-24 17:30:20 +01:00
|
|
|
if (!lockFile(globalLock, ltWrite, false)) {
|
|
|
|
printMsg(lvlError, "waiting for exclusive access to the Nix store...");
|
|
|
|
lockFile(globalLock, ltWrite, true);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Get the schema version again, because another process may
|
|
|
|
have performed the upgrade already. */
|
|
|
|
curSchema = getSchema();
|
|
|
|
|
|
|
|
if (curSchema < 6) upgradeStore6();
|
|
|
|
|
2008-06-09 15:52:45 +02:00
|
|
|
writeFile(schemaPath, (format("%1%") % nixSchemaVersion).str());
|
2010-02-24 17:30:20 +01:00
|
|
|
|
|
|
|
lockFile(globalLock, ltRead, true);
|
2005-02-09 10:50:29 +01:00
|
|
|
}
|
2012-07-30 21:42:18 +02:00
|
|
|
|
2010-02-24 17:30:20 +01:00
|
|
|
else openDB(false);
|
2003-10-15 14:42:39 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2006-11-30 18:43:04 +01:00
|
|
|
LocalStore::~LocalStore()
|
2006-03-01 17:36:35 +01:00
|
|
|
{
|
2007-05-01 17:16:17 +02:00
|
|
|
try {
|
2008-08-02 14:54:35 +02:00
|
|
|
foreach (RunningSubstituters::iterator, i, runningSubstituters) {
|
2009-03-28 20:41:53 +01:00
|
|
|
i->second.to.close();
|
|
|
|
i->second.from.close();
|
2008-08-02 14:54:35 +02:00
|
|
|
i->second.pid.wait(true);
|
|
|
|
}
|
2007-05-01 17:16:17 +02:00
|
|
|
} catch (...) {
|
|
|
|
ignoreException();
|
|
|
|
}
|
2006-03-01 17:36:35 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-06-09 15:52:45 +02:00
|
|
|
int LocalStore::getSchema()
|
2003-10-15 14:42:39 +02:00
|
|
|
{
|
2008-06-09 15:52:45 +02:00
|
|
|
int curSchema = 0;
|
|
|
|
if (pathExists(schemaPath)) {
|
|
|
|
string s = readFile(schemaPath);
|
|
|
|
if (!string2Int(s, curSchema))
|
|
|
|
throw Error(format("`%1%' is corrupt") % schemaPath);
|
|
|
|
}
|
|
|
|
return curSchema;
|
2003-10-15 14:42:39 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-02-24 17:30:20 +01:00
|
|
|
void LocalStore::openDB(bool create)
|
2010-02-18 14:16:59 +01:00
|
|
|
{
|
2010-02-24 17:30:20 +01:00
|
|
|
/* Open the Nix database. */
|
2012-07-31 01:55:41 +02:00
|
|
|
if (sqlite3_open_v2((settings.nixDBPath + "/db.sqlite").c_str(), &db.db,
|
2010-02-24 17:30:20 +01:00
|
|
|
SQLITE_OPEN_READWRITE | (create ? SQLITE_OPEN_CREATE : 0), 0) != SQLITE_OK)
|
|
|
|
throw Error("cannot open SQLite database");
|
2010-02-19 17:04:51 +01:00
|
|
|
|
2010-03-08 11:35:45 +01:00
|
|
|
if (sqlite3_busy_timeout(db, 60 * 60 * 1000) != SQLITE_OK)
|
2010-12-05 19:23:19 +01:00
|
|
|
throwSQLiteError(db, "setting timeout");
|
2010-02-18 15:30:42 +01:00
|
|
|
|
2010-02-24 17:30:20 +01:00
|
|
|
if (sqlite3_exec(db, "pragma foreign_keys = 1;", 0, 0, 0) != SQLITE_OK)
|
2010-12-05 19:23:19 +01:00
|
|
|
throwSQLiteError(db, "enabling foreign keys");
|
2010-02-24 17:30:20 +01:00
|
|
|
|
|
|
|
/* !!! check whether sqlite has been built with foreign key
|
|
|
|
support */
|
2012-07-30 21:42:18 +02:00
|
|
|
|
2010-02-24 17:30:20 +01:00
|
|
|
/* Whether SQLite should fsync(). "Normal" synchronous mode
|
|
|
|
should be safe enough. If the user asks for it, don't sync at
|
|
|
|
all. This can cause database corruption if the system
|
|
|
|
crashes. */
|
2012-07-31 01:55:41 +02:00
|
|
|
string syncMode = settings.fsyncMetadata ? "normal" : "off";
|
2010-02-24 17:30:20 +01:00
|
|
|
if (sqlite3_exec(db, ("pragma synchronous = " + syncMode + ";").c_str(), 0, 0, 0) != SQLITE_OK)
|
2010-12-05 19:23:19 +01:00
|
|
|
throwSQLiteError(db, "setting synchronous mode");
|
2010-02-18 15:30:42 +01:00
|
|
|
|
2011-11-07 22:11:59 +01:00
|
|
|
/* Set the SQLite journal mode. WAL mode is fastest, so it's the
|
|
|
|
default. */
|
2012-07-31 01:55:41 +02:00
|
|
|
string mode = settings.useSQLiteWAL ? "wal" : "truncate";
|
2010-09-01 13:36:22 +02:00
|
|
|
string prevMode;
|
|
|
|
{
|
|
|
|
SQLiteStmt stmt;
|
|
|
|
stmt.create(db, "pragma main.journal_mode;");
|
|
|
|
if (sqlite3_step(stmt) != SQLITE_ROW)
|
2010-12-05 19:23:19 +01:00
|
|
|
throwSQLiteError(db, "querying journal mode");
|
2010-09-01 13:36:22 +02:00
|
|
|
prevMode = string((const char *) sqlite3_column_text(stmt, 0));
|
|
|
|
}
|
|
|
|
if (prevMode != mode &&
|
|
|
|
sqlite3_exec(db, ("pragma main.journal_mode = " + mode + ";").c_str(), 0, 0, 0) != SQLITE_OK)
|
2010-12-05 19:23:19 +01:00
|
|
|
throwSQLiteError(db, "setting journal mode");
|
2010-02-18 15:30:42 +01:00
|
|
|
|
2010-08-04 19:35:59 +02:00
|
|
|
/* Increase the auto-checkpoint interval to 8192 pages. This
|
|
|
|
seems enough to ensure that instantiating the NixOS system
|
|
|
|
derivation is done in a single fsync(). */
|
2010-12-08 19:19:15 +01:00
|
|
|
if (mode == "wal" && sqlite3_exec(db, "pragma wal_autocheckpoint = 8192;", 0, 0, 0) != SQLITE_OK)
|
2010-12-05 19:23:19 +01:00
|
|
|
throwSQLiteError(db, "setting autocheckpoint interval");
|
2012-07-30 21:42:18 +02:00
|
|
|
|
2010-02-24 17:30:20 +01:00
|
|
|
/* Initialise the database schema, if necessary. */
|
|
|
|
if (create) {
|
|
|
|
#include "schema.sql.hh"
|
|
|
|
if (sqlite3_exec(db, (const char *) schema, 0, 0, 0) != SQLITE_OK)
|
2010-12-05 19:23:19 +01:00
|
|
|
throwSQLiteError(db, "initialising database schema");
|
2010-02-24 17:30:20 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Prepare SQL statements. */
|
2010-02-18 15:30:42 +01:00
|
|
|
stmtRegisterValidPath.create(db,
|
2010-11-16 18:11:46 +01:00
|
|
|
"insert into ValidPaths (path, hash, registrationTime, deriver, narSize) values (?, ?, ?, ?, ?);");
|
2010-12-06 16:29:38 +01:00
|
|
|
stmtUpdatePathInfo.create(db,
|
2011-12-02 18:52:18 +01:00
|
|
|
"update ValidPaths set narSize = ?, hash = ? where path = ?;");
|
2010-02-18 15:30:42 +01:00
|
|
|
stmtAddReference.create(db,
|
2010-02-19 17:04:51 +01:00
|
|
|
"insert or replace into Refs (referrer, reference) values (?, ?);");
|
2010-02-18 16:52:57 +01:00
|
|
|
stmtQueryPathInfo.create(db,
|
2010-11-17 13:40:52 +01:00
|
|
|
"select id, hash, registrationTime, deriver, narSize from ValidPaths where path = ?;");
|
2010-02-18 16:52:57 +01:00
|
|
|
stmtQueryReferences.create(db,
|
|
|
|
"select path from Refs join ValidPaths on reference = id where referrer = ?;");
|
2010-02-18 17:21:59 +01:00
|
|
|
stmtQueryReferrers.create(db,
|
|
|
|
"select path from Refs join ValidPaths on referrer = id where reference = (select id from ValidPaths where path = ?);");
|
2010-02-19 17:43:25 +01:00
|
|
|
stmtInvalidatePath.create(db,
|
|
|
|
"delete from ValidPaths where path = ?;");
|
2010-02-19 18:15:22 +01:00
|
|
|
stmtRegisterFailedPath.create(db,
|
|
|
|
"insert into FailedPaths (path, time) values (?, ?);");
|
|
|
|
stmtHasPathFailed.create(db,
|
|
|
|
"select time from FailedPaths where path = ?;");
|
2010-04-26 14:43:42 +02:00
|
|
|
stmtQueryFailedPaths.create(db,
|
|
|
|
"select path from FailedPaths;");
|
2012-03-20 18:23:26 +01:00
|
|
|
// If the path is a derivation, then clear its outputs.
|
2010-04-26 14:56:42 +02:00
|
|
|
stmtClearFailedPath.create(db,
|
2012-03-20 18:23:26 +01:00
|
|
|
"delete from FailedPaths where ?1 = '*' or path = ?1 "
|
|
|
|
"or path in (select d.path from DerivationOutputs d join ValidPaths v on d.drv = v.id where v.path = ?1);");
|
2010-02-22 12:15:50 +01:00
|
|
|
stmtAddDerivationOutput.create(db,
|
|
|
|
"insert or replace into DerivationOutputs (drv, id, path) values (?, ?, ?);");
|
2010-02-22 12:44:17 +01:00
|
|
|
stmtQueryValidDerivers.create(db,
|
|
|
|
"select v.id, v.path from DerivationOutputs d join ValidPaths v on d.drv = v.id where d.path = ?;");
|
2010-02-22 13:44:36 +01:00
|
|
|
stmtQueryDerivationOutputs.create(db,
|
|
|
|
"select id, path from DerivationOutputs where drv = ?;");
|
2012-07-18 00:55:39 +02:00
|
|
|
// Use "path >= ?" with limit 1 rather than "path like '?%'" to
|
|
|
|
// ensure efficient lookup.
|
|
|
|
stmtQueryPathFromHashPart.create(db,
|
|
|
|
"select path from ValidPaths where path >= ? limit 1;");
|
2010-02-18 14:16:59 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-09-19 21:45:29 +02:00
|
|
|
/* To improve purity, users may want to make the Nix store a read-only
|
|
|
|
bind mount. So make the Nix store writable for this process. */
|
|
|
|
void LocalStore::makeStoreWritable()
|
|
|
|
{
|
|
|
|
#if HAVE_UNSHARE
|
|
|
|
if (getuid() != 0) return;
|
|
|
|
|
|
|
|
if (!pathExists("/proc/self/mountinfo")) return;
|
|
|
|
|
|
|
|
/* Check if /nix/store is a read-only bind mount. */
|
|
|
|
bool found = false;
|
|
|
|
Strings mounts = tokenizeString<Strings>(readFile("/proc/self/mountinfo", true), "\n");
|
|
|
|
foreach (Strings::iterator, i, mounts) {
|
|
|
|
vector<string> fields = tokenizeString<vector<string> >(*i, " ");
|
|
|
|
if (fields.at(3) == "/" || fields.at(4) != settings.nixStore) continue;
|
|
|
|
Strings options = tokenizeString<Strings>(fields.at(5), ",");
|
|
|
|
if (std::find(options.begin(), options.end(), "ro") == options.end()) continue;
|
|
|
|
found = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!found) return;
|
|
|
|
|
|
|
|
if (unshare(CLONE_NEWNS) == -1)
|
|
|
|
throw SysError("setting up a private mount namespace");
|
|
|
|
|
|
|
|
if (mount(0, settings.nixStore.c_str(), 0, MS_REMOUNT | MS_BIND, 0) == -1)
|
|
|
|
throw SysError(format("remounting %1% writable") % settings.nixStore);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-02-15 01:00:30 +01:00
|
|
|
const time_t mtimeStore = 1; /* 1 second into the epoch */
|
|
|
|
|
|
|
|
|
2008-06-09 15:52:45 +02:00
|
|
|
void canonicalisePathMetaData(const Path & path, bool recurse)
|
2005-01-19 17:39:47 +01:00
|
|
|
{
|
|
|
|
checkInterrupt();
|
|
|
|
|
|
|
|
struct stat st;
|
|
|
|
if (lstat(path.c_str(), &st))
|
2012-07-30 21:42:18 +02:00
|
|
|
throw SysError(format("getting attributes of path `%1%'") % path);
|
2005-01-19 17:39:47 +01:00
|
|
|
|
2012-02-15 01:31:56 +01:00
|
|
|
/* Really make sure that the path is of a supported type. This
|
|
|
|
has already been checked in dumpPath(). */
|
|
|
|
assert(S_ISREG(st.st_mode) || S_ISDIR(st.st_mode) || S_ISLNK(st.st_mode));
|
|
|
|
|
2008-06-09 15:52:45 +02:00
|
|
|
/* Change ownership to the current uid. If it's a symlink, use
|
2006-12-09 21:02:27 +01:00
|
|
|
lchown if available, otherwise don't bother. Wrong ownership
|
|
|
|
of a symlink doesn't matter, since the owning user can't change
|
|
|
|
the symlink and can't delete it because the directory is not
|
|
|
|
writable. The only exception is top-level paths in the Nix
|
|
|
|
store (since that directory is group-writable for the Nix build
|
|
|
|
users group); we check for this case below. */
|
|
|
|
if (st.st_uid != geteuid()) {
|
|
|
|
#if HAVE_LCHOWN
|
2007-02-06 21:03:53 +01:00
|
|
|
if (lchown(path.c_str(), geteuid(), (gid_t) -1) == -1)
|
2006-12-09 21:02:27 +01:00
|
|
|
#else
|
|
|
|
if (!S_ISLNK(st.st_mode) &&
|
2007-02-06 21:03:53 +01:00
|
|
|
chown(path.c_str(), geteuid(), (gid_t) -1) == -1)
|
2006-12-09 21:02:27 +01:00
|
|
|
#endif
|
|
|
|
throw SysError(format("changing owner of `%1%' to %2%")
|
|
|
|
% path % geteuid());
|
|
|
|
}
|
2012-07-23 22:52:25 +02:00
|
|
|
|
2005-01-19 17:39:47 +01:00
|
|
|
if (!S_ISLNK(st.st_mode)) {
|
|
|
|
|
|
|
|
/* Mask out all type related bits. */
|
|
|
|
mode_t mode = st.st_mode & ~S_IFMT;
|
2012-07-30 21:42:18 +02:00
|
|
|
|
2005-01-19 17:39:47 +01:00
|
|
|
if (mode != 0444 && mode != 0555) {
|
|
|
|
mode = (st.st_mode & S_IFMT)
|
|
|
|
| 0444
|
|
|
|
| (st.st_mode & S_IXUSR ? 0111 : 0);
|
|
|
|
if (chmod(path.c_str(), mode) == -1)
|
|
|
|
throw SysError(format("changing mode of `%1%' to %2$o") % path % mode);
|
|
|
|
}
|
|
|
|
|
2012-07-23 22:52:25 +02:00
|
|
|
}
|
2012-07-30 21:42:18 +02:00
|
|
|
|
2012-07-23 22:52:25 +02:00
|
|
|
if (st.st_mtime != mtimeStore) {
|
|
|
|
struct timeval times[2];
|
|
|
|
times[0].tv_sec = st.st_atime;
|
|
|
|
times[0].tv_usec = 0;
|
|
|
|
times[1].tv_sec = mtimeStore;
|
|
|
|
times[1].tv_usec = 0;
|
|
|
|
#if HAVE_LUTIMES
|
|
|
|
if (lutimes(path.c_str(), times) == -1)
|
|
|
|
#else
|
|
|
|
if (!S_ISLNK(st.st_mode) && utimes(path.c_str(), times) == -1)
|
2012-07-30 21:42:18 +02:00
|
|
|
#endif
|
2012-07-23 22:52:25 +02:00
|
|
|
throw SysError(format("changing modification time of `%1%'") % path);
|
2005-01-19 17:39:47 +01:00
|
|
|
}
|
|
|
|
|
2007-10-10 00:14:27 +02:00
|
|
|
if (recurse && S_ISDIR(st.st_mode)) {
|
2005-01-19 17:39:47 +01:00
|
|
|
Strings names = readDirectory(path);
|
2012-07-30 21:42:18 +02:00
|
|
|
foreach (Strings::iterator, i, names)
|
|
|
|
canonicalisePathMetaData(path + "/" + *i, true);
|
2006-12-09 21:02:27 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void canonicalisePathMetaData(const Path & path)
|
|
|
|
{
|
2008-06-09 15:52:45 +02:00
|
|
|
canonicalisePathMetaData(path, true);
|
2006-12-09 21:02:27 +01:00
|
|
|
|
|
|
|
/* On platforms that don't have lchown(), the top-level path can't
|
|
|
|
be a symlink, since we can't change its ownership. */
|
|
|
|
struct stat st;
|
|
|
|
if (lstat(path.c_str(), &st))
|
2012-07-30 21:42:18 +02:00
|
|
|
throw SysError(format("getting attributes of path `%1%'") % path);
|
2006-12-09 21:02:27 +01:00
|
|
|
|
|
|
|
if (st.st_uid != geteuid()) {
|
|
|
|
assert(S_ISLNK(st.st_mode));
|
|
|
|
throw Error(format("wrong ownership of top-level store path `%1%'") % path);
|
2005-01-19 17:39:47 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-07-20 20:10:47 +02:00
|
|
|
void LocalStore::checkDerivationOutputs(const Path & drvPath, const Derivation & drv)
|
|
|
|
{
|
|
|
|
string drvName = storePathToName(drvPath);
|
|
|
|
assert(isDerivation(drvName));
|
|
|
|
drvName = string(drvName, 0, drvName.size() - drvExtension.size());
|
2012-07-30 21:42:18 +02:00
|
|
|
|
2011-07-20 20:10:47 +02:00
|
|
|
if (isFixedOutputDrv(drv)) {
|
|
|
|
DerivationOutputs::const_iterator out = drv.outputs.find("out");
|
|
|
|
if (out == drv.outputs.end())
|
|
|
|
throw Error(format("derivation `%1%' does not have an output named `out'") % drvPath);
|
|
|
|
|
|
|
|
bool recursive; HashType ht; Hash h;
|
|
|
|
out->second.parseHashInfo(recursive, ht, h);
|
|
|
|
Path outPath = makeFixedOutputPath(recursive, ht, h, drvName);
|
|
|
|
|
|
|
|
StringPairs::const_iterator j = drv.env.find("out");
|
|
|
|
if (out->second.path != outPath || j == drv.env.end() || j->second != outPath)
|
|
|
|
throw Error(format("derivation `%1%' has incorrect output `%2%', should be `%3%'")
|
|
|
|
% drvPath % out->second.path % outPath);
|
|
|
|
}
|
|
|
|
|
|
|
|
else {
|
|
|
|
Derivation drvCopy(drv);
|
|
|
|
foreach (DerivationOutputs::iterator, i, drvCopy.outputs) {
|
|
|
|
i->second.path = "";
|
|
|
|
drvCopy.env[i->first] = "";
|
|
|
|
}
|
|
|
|
|
2011-08-31 23:11:50 +02:00
|
|
|
Hash h = hashDerivationModulo(*this, drvCopy);
|
2012-07-30 21:42:18 +02:00
|
|
|
|
2011-07-20 20:10:47 +02:00
|
|
|
foreach (DerivationOutputs::const_iterator, i, drv.outputs) {
|
|
|
|
Path outPath = makeOutputPath(i->first, h, drvName);
|
|
|
|
StringPairs::const_iterator j = drv.env.find(i->first);
|
|
|
|
if (i->second.path != outPath || j == drv.env.end() || j->second != outPath)
|
|
|
|
throw Error(format("derivation `%1%' has incorrect output `%2%', should be `%3%'")
|
|
|
|
% drvPath % i->second.path % outPath);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-09-12 11:07:43 +02:00
|
|
|
unsigned long long LocalStore::addValidPath(const ValidPathInfo & info, bool checkOutputs)
|
2003-10-10 16:46:28 +02:00
|
|
|
{
|
2010-02-19 17:04:51 +01:00
|
|
|
SQLiteStmtUse use(stmtRegisterValidPath);
|
2010-02-19 17:43:25 +01:00
|
|
|
stmtRegisterValidPath.bind(info.path);
|
|
|
|
stmtRegisterValidPath.bind("sha256:" + printHash(info.hash));
|
2010-12-14 14:25:20 +01:00
|
|
|
stmtRegisterValidPath.bind(info.registrationTime == 0 ? time(0) : info.registrationTime);
|
2010-02-19 17:43:25 +01:00
|
|
|
if (info.deriver != "")
|
|
|
|
stmtRegisterValidPath.bind(info.deriver);
|
|
|
|
else
|
|
|
|
stmtRegisterValidPath.bind(); // null
|
2010-11-16 18:11:46 +01:00
|
|
|
if (info.narSize != 0)
|
2010-11-17 13:40:52 +01:00
|
|
|
stmtRegisterValidPath.bind64(info.narSize);
|
2010-11-16 18:11:46 +01:00
|
|
|
else
|
|
|
|
stmtRegisterValidPath.bind(); // null
|
2010-02-19 17:04:51 +01:00
|
|
|
if (sqlite3_step(stmtRegisterValidPath) != SQLITE_DONE)
|
2010-12-05 19:23:19 +01:00
|
|
|
throwSQLiteError(db, format("registering valid path `%1%' in database") % info.path);
|
2010-02-22 12:15:50 +01:00
|
|
|
unsigned long long id = sqlite3_last_insert_rowid(db);
|
|
|
|
|
|
|
|
/* If this is a derivation, then store the derivation outputs in
|
|
|
|
the database. This is useful for the garbage collector: it can
|
|
|
|
efficiently query whether a path is an output of some
|
|
|
|
derivation. */
|
|
|
|
if (isDerivation(info.path)) {
|
2010-05-13 00:13:09 +02:00
|
|
|
Derivation drv = parseDerivation(readFile(info.path));
|
2012-07-30 21:42:18 +02:00
|
|
|
|
2011-07-20 20:10:47 +02:00
|
|
|
/* Verify that the output paths in the derivation are correct
|
|
|
|
(i.e., follow the scheme for computing output paths from
|
|
|
|
derivations). Note that if this throws an error, then the
|
|
|
|
DB transaction is rolled back, so the path validity
|
|
|
|
registration above is undone. */
|
2011-09-12 11:07:43 +02:00
|
|
|
if (checkOutputs) checkDerivationOutputs(info.path, drv);
|
2012-07-30 21:42:18 +02:00
|
|
|
|
2010-02-22 12:15:50 +01:00
|
|
|
foreach (DerivationOutputs::iterator, i, drv.outputs) {
|
|
|
|
SQLiteStmtUse use(stmtAddDerivationOutput);
|
|
|
|
stmtAddDerivationOutput.bind(id);
|
|
|
|
stmtAddDerivationOutput.bind(i->first);
|
|
|
|
stmtAddDerivationOutput.bind(i->second.path);
|
|
|
|
if (sqlite3_step(stmtAddDerivationOutput) != SQLITE_DONE)
|
2010-12-05 19:23:19 +01:00
|
|
|
throwSQLiteError(db, format("adding derivation output for `%1%' in database") % info.path);
|
2010-02-22 12:15:50 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return id;
|
2010-02-19 17:04:51 +01:00
|
|
|
}
|
|
|
|
|
2008-06-09 15:52:45 +02:00
|
|
|
|
2010-02-19 17:04:51 +01:00
|
|
|
void LocalStore::addReference(unsigned long long referrer, unsigned long long reference)
|
|
|
|
{
|
|
|
|
SQLiteStmtUse use(stmtAddReference);
|
2010-02-19 17:43:25 +01:00
|
|
|
stmtAddReference.bind(referrer);
|
|
|
|
stmtAddReference.bind(reference);
|
2010-02-19 17:04:51 +01:00
|
|
|
if (sqlite3_step(stmtAddReference) != SQLITE_DONE)
|
2010-12-05 19:23:19 +01:00
|
|
|
throwSQLiteError(db, "adding reference to database");
|
2010-02-19 17:04:51 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2009-03-25 22:05:42 +01:00
|
|
|
void LocalStore::registerFailedPath(const Path & path)
|
|
|
|
{
|
2010-02-19 18:15:22 +01:00
|
|
|
if (hasPathFailed(path)) return;
|
|
|
|
SQLiteStmtUse use(stmtRegisterFailedPath);
|
|
|
|
stmtRegisterFailedPath.bind(path);
|
|
|
|
stmtRegisterFailedPath.bind(time(0));
|
|
|
|
if (sqlite3_step(stmtRegisterFailedPath) != SQLITE_DONE)
|
2010-12-05 19:23:19 +01:00
|
|
|
throwSQLiteError(db, format("registering failed path `%1%'") % path);
|
2009-03-25 22:05:42 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
bool LocalStore::hasPathFailed(const Path & path)
|
|
|
|
{
|
2010-02-19 18:15:22 +01:00
|
|
|
SQLiteStmtUse use(stmtHasPathFailed);
|
|
|
|
stmtHasPathFailed.bind(path);
|
|
|
|
int res = sqlite3_step(stmtHasPathFailed);
|
|
|
|
if (res != SQLITE_DONE && res != SQLITE_ROW)
|
2010-12-05 19:23:19 +01:00
|
|
|
throwSQLiteError(db, "querying whether path failed");
|
2010-02-19 18:15:22 +01:00
|
|
|
return res == SQLITE_ROW;
|
2009-03-25 22:05:42 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-04-26 14:43:42 +02:00
|
|
|
PathSet LocalStore::queryFailedPaths()
|
|
|
|
{
|
|
|
|
SQLiteStmtUse use(stmtQueryFailedPaths);
|
|
|
|
|
|
|
|
PathSet res;
|
|
|
|
int r;
|
|
|
|
while ((r = sqlite3_step(stmtQueryFailedPaths)) == SQLITE_ROW) {
|
|
|
|
const char * s = (const char *) sqlite3_column_text(stmtQueryFailedPaths, 0);
|
|
|
|
assert(s);
|
|
|
|
res.insert(s);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (r != SQLITE_DONE)
|
2010-12-05 19:23:19 +01:00
|
|
|
throwSQLiteError(db, "error querying failed paths");
|
2010-04-26 14:43:42 +02:00
|
|
|
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-04-26 14:56:42 +02:00
|
|
|
void LocalStore::clearFailedPaths(const PathSet & paths)
|
|
|
|
{
|
|
|
|
SQLiteTxn txn(db);
|
|
|
|
|
|
|
|
foreach (PathSet::const_iterator, i, paths) {
|
|
|
|
SQLiteStmtUse use(stmtClearFailedPath);
|
|
|
|
stmtClearFailedPath.bind(*i);
|
|
|
|
if (sqlite3_step(stmtClearFailedPath) != SQLITE_DONE)
|
2010-12-05 19:23:19 +01:00
|
|
|
throwSQLiteError(db, format("clearing failed path `%1%' in database") % *i);
|
2010-04-26 14:56:42 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
txn.commit();
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-06-09 15:52:45 +02:00
|
|
|
Hash parseHashField(const Path & path, const string & s)
|
2003-10-10 17:25:21 +02:00
|
|
|
{
|
2008-06-09 15:52:45 +02:00
|
|
|
string::size_type colon = s.find(':');
|
|
|
|
if (colon == string::npos)
|
|
|
|
throw Error(format("corrupt hash `%1%' in valid-path entry for `%2%'")
|
|
|
|
% s % path);
|
|
|
|
HashType ht = parseHashType(string(s, 0, colon));
|
|
|
|
if (ht == htUnknown)
|
|
|
|
throw Error(format("unknown hash type `%1%' in valid-path entry for `%2%'")
|
|
|
|
% string(s, 0, colon) % path);
|
|
|
|
return parseHash(ht, string(s, colon + 1));
|
2003-10-10 17:25:21 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-02-18 16:52:57 +01:00
|
|
|
ValidPathInfo LocalStore::queryPathInfo(const Path & path)
|
2006-11-30 18:43:04 +01:00
|
|
|
{
|
2010-02-19 17:04:51 +01:00
|
|
|
ValidPathInfo info;
|
|
|
|
info.path = path;
|
2008-06-09 15:52:45 +02:00
|
|
|
|
|
|
|
assertStorePath(path);
|
|
|
|
|
2010-02-18 16:52:57 +01:00
|
|
|
/* Get the path info. */
|
2010-02-19 17:04:51 +01:00
|
|
|
SQLiteStmtUse use1(stmtQueryPathInfo);
|
2010-02-19 17:43:25 +01:00
|
|
|
|
|
|
|
stmtQueryPathInfo.bind(path);
|
2012-07-30 21:42:18 +02:00
|
|
|
|
2010-02-18 16:52:57 +01:00
|
|
|
int r = sqlite3_step(stmtQueryPathInfo);
|
|
|
|
if (r == SQLITE_DONE) throw Error(format("path `%1%' is not valid") % path);
|
2010-12-05 19:23:19 +01:00
|
|
|
if (r != SQLITE_ROW) throwSQLiteError(db, "querying path in database");
|
2010-01-29 12:53:58 +01:00
|
|
|
|
2010-02-19 17:04:51 +01:00
|
|
|
info.id = sqlite3_column_int(stmtQueryPathInfo, 0);
|
2008-06-09 15:52:45 +02:00
|
|
|
|
2010-02-18 16:52:57 +01:00
|
|
|
const char * s = (const char *) sqlite3_column_text(stmtQueryPathInfo, 1);
|
|
|
|
assert(s);
|
2010-02-19 17:04:51 +01:00
|
|
|
info.hash = parseHashField(path, s);
|
2012-07-30 21:42:18 +02:00
|
|
|
|
2010-02-19 17:04:51 +01:00
|
|
|
info.registrationTime = sqlite3_column_int(stmtQueryPathInfo, 2);
|
2008-06-09 15:52:45 +02:00
|
|
|
|
2010-02-18 16:52:57 +01:00
|
|
|
s = (const char *) sqlite3_column_text(stmtQueryPathInfo, 3);
|
2010-02-19 17:04:51 +01:00
|
|
|
if (s) info.deriver = s;
|
2010-02-18 16:52:57 +01:00
|
|
|
|
2010-11-17 13:40:52 +01:00
|
|
|
/* Note that narSize = NULL yields 0. */
|
|
|
|
info.narSize = sqlite3_column_int64(stmtQueryPathInfo, 4);
|
|
|
|
|
2010-02-18 16:52:57 +01:00
|
|
|
/* Get the references. */
|
2010-02-19 17:04:51 +01:00
|
|
|
SQLiteStmtUse use2(stmtQueryReferences);
|
2010-02-18 16:52:57 +01:00
|
|
|
|
2010-02-19 17:43:25 +01:00
|
|
|
stmtQueryReferences.bind(info.id);
|
2010-02-18 16:52:57 +01:00
|
|
|
|
|
|
|
while ((r = sqlite3_step(stmtQueryReferences)) == SQLITE_ROW) {
|
|
|
|
s = (const char *) sqlite3_column_text(stmtQueryReferences, 0);
|
|
|
|
assert(s);
|
2010-02-19 17:04:51 +01:00
|
|
|
info.references.insert(s);
|
2008-06-09 15:52:45 +02:00
|
|
|
}
|
|
|
|
|
2010-02-18 16:52:57 +01:00
|
|
|
if (r != SQLITE_DONE)
|
2010-12-05 19:23:19 +01:00
|
|
|
throwSQLiteError(db, format("error getting references of `%1%'") % path);
|
2010-02-18 16:52:57 +01:00
|
|
|
|
2010-02-19 17:04:51 +01:00
|
|
|
return info;
|
2006-11-30 18:43:04 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-12-02 18:52:18 +01:00
|
|
|
/* Update path info in the database. Currently only updates the
|
2010-12-06 16:29:38 +01:00
|
|
|
narSize field. */
|
|
|
|
void LocalStore::updatePathInfo(const ValidPathInfo & info)
|
|
|
|
{
|
|
|
|
SQLiteStmtUse use(stmtUpdatePathInfo);
|
|
|
|
if (info.narSize != 0)
|
|
|
|
stmtUpdatePathInfo.bind64(info.narSize);
|
|
|
|
else
|
|
|
|
stmtUpdatePathInfo.bind(); // null
|
2011-12-02 18:52:18 +01:00
|
|
|
stmtUpdatePathInfo.bind("sha256:" + printHash(info.hash));
|
2010-12-06 16:29:38 +01:00
|
|
|
stmtUpdatePathInfo.bind(info.path);
|
|
|
|
if (sqlite3_step(stmtUpdatePathInfo) != SQLITE_DONE)
|
|
|
|
throwSQLiteError(db, format("updating info of path `%1%' in database") % info.path);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-02-24 16:07:23 +01:00
|
|
|
unsigned long long LocalStore::queryValidPathId(const Path & path)
|
|
|
|
{
|
|
|
|
SQLiteStmtUse use(stmtQueryPathInfo);
|
|
|
|
stmtQueryPathInfo.bind(path);
|
|
|
|
int res = sqlite3_step(stmtQueryPathInfo);
|
|
|
|
if (res == SQLITE_ROW) return sqlite3_column_int(stmtQueryPathInfo, 0);
|
|
|
|
if (res == SQLITE_DONE) throw Error(format("path `%1%' is not valid") % path);
|
2010-12-05 19:23:19 +01:00
|
|
|
throwSQLiteError(db, "querying path in database");
|
2010-02-24 16:07:23 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-06-09 15:52:45 +02:00
|
|
|
bool LocalStore::isValidPath(const Path & path)
|
2005-01-19 17:59:56 +01:00
|
|
|
{
|
2010-02-19 17:04:51 +01:00
|
|
|
SQLiteStmtUse use(stmtQueryPathInfo);
|
2010-02-19 17:43:25 +01:00
|
|
|
stmtQueryPathInfo.bind(path);
|
2010-02-18 16:52:57 +01:00
|
|
|
int res = sqlite3_step(stmtQueryPathInfo);
|
2010-02-18 16:11:08 +01:00
|
|
|
if (res != SQLITE_DONE && res != SQLITE_ROW)
|
2010-12-05 19:23:19 +01:00
|
|
|
throwSQLiteError(db, "querying path in database");
|
2010-02-18 16:11:08 +01:00
|
|
|
return res == SQLITE_ROW;
|
2005-01-19 17:59:56 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-07-11 17:08:47 +02:00
|
|
|
PathSet LocalStore::queryValidPaths(const PathSet & paths)
|
|
|
|
{
|
|
|
|
PathSet res;
|
|
|
|
foreach (PathSet::const_iterator, i, paths)
|
|
|
|
if (isValidPath(*i)) res.insert(*i);
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-07-11 16:49:04 +02:00
|
|
|
PathSet LocalStore::queryAllValidPaths()
|
2006-11-30 18:43:04 +01:00
|
|
|
{
|
2010-02-18 17:51:27 +01:00
|
|
|
SQLiteStmt stmt;
|
|
|
|
stmt.create(db, "select path from ValidPaths");
|
2012-07-30 21:42:18 +02:00
|
|
|
|
2010-02-18 17:51:27 +01:00
|
|
|
PathSet res;
|
2012-07-30 21:42:18 +02:00
|
|
|
|
2010-02-18 17:51:27 +01:00
|
|
|
int r;
|
|
|
|
while ((r = sqlite3_step(stmt)) == SQLITE_ROW) {
|
|
|
|
const char * s = (const char *) sqlite3_column_text(stmt, 0);
|
|
|
|
assert(s);
|
|
|
|
res.insert(s);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (r != SQLITE_DONE)
|
2010-12-05 19:23:19 +01:00
|
|
|
throwSQLiteError(db, "error getting valid paths");
|
2010-02-18 17:51:27 +01:00
|
|
|
|
|
|
|
return res;
|
2006-11-30 18:43:04 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-06-09 15:52:45 +02:00
|
|
|
void LocalStore::queryReferences(const Path & path,
|
|
|
|
PathSet & references)
|
2005-02-07 14:40:40 +01:00
|
|
|
{
|
2008-06-09 15:52:45 +02:00
|
|
|
ValidPathInfo info = queryPathInfo(path);
|
|
|
|
references.insert(info.references.begin(), info.references.end());
|
2005-02-07 14:40:40 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-02-18 17:21:59 +01:00
|
|
|
void LocalStore::queryReferrers(const Path & path, PathSet & referrers)
|
2005-02-07 14:40:40 +01:00
|
|
|
{
|
2010-02-18 17:21:59 +01:00
|
|
|
assertStorePath(path);
|
2008-06-09 15:52:45 +02:00
|
|
|
|
2010-02-19 17:04:51 +01:00
|
|
|
SQLiteStmtUse use(stmtQueryReferrers);
|
2008-06-09 15:52:45 +02:00
|
|
|
|
2010-02-19 17:43:25 +01:00
|
|
|
stmtQueryReferrers.bind(path);
|
2008-06-09 15:52:45 +02:00
|
|
|
|
2010-02-18 17:21:59 +01:00
|
|
|
int r;
|
|
|
|
while ((r = sqlite3_step(stmtQueryReferrers)) == SQLITE_ROW) {
|
|
|
|
const char * s = (const char *) sqlite3_column_text(stmtQueryReferrers, 0);
|
|
|
|
assert(s);
|
|
|
|
referrers.insert(s);
|
|
|
|
}
|
2008-06-09 15:52:45 +02:00
|
|
|
|
2010-02-18 17:21:59 +01:00
|
|
|
if (r != SQLITE_DONE)
|
2010-12-05 19:23:19 +01:00
|
|
|
throwSQLiteError(db, format("error getting references of `%1%'") % path);
|
2005-02-07 14:40:40 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-06-12 18:53:44 +02:00
|
|
|
Path LocalStore::queryDeriver(const Path & path)
|
|
|
|
{
|
2008-06-09 15:52:45 +02:00
|
|
|
return queryPathInfo(path).deriver;
|
2007-06-12 18:53:44 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-02-22 12:44:17 +01:00
|
|
|
PathSet LocalStore::queryValidDerivers(const Path & path)
|
|
|
|
{
|
|
|
|
assertStorePath(path);
|
|
|
|
|
|
|
|
SQLiteStmtUse use(stmtQueryValidDerivers);
|
|
|
|
stmtQueryValidDerivers.bind(path);
|
|
|
|
|
|
|
|
PathSet derivers;
|
|
|
|
int r;
|
|
|
|
while ((r = sqlite3_step(stmtQueryValidDerivers)) == SQLITE_ROW) {
|
|
|
|
const char * s = (const char *) sqlite3_column_text(stmtQueryValidDerivers, 1);
|
|
|
|
assert(s);
|
|
|
|
derivers.insert(s);
|
|
|
|
}
|
2012-07-30 21:42:18 +02:00
|
|
|
|
2010-02-22 12:44:17 +01:00
|
|
|
if (r != SQLITE_DONE)
|
2010-12-05 19:23:19 +01:00
|
|
|
throwSQLiteError(db, format("error getting valid derivers of `%1%'") % path);
|
2012-07-30 21:42:18 +02:00
|
|
|
|
2010-02-22 12:44:17 +01:00
|
|
|
return derivers;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-02-22 13:44:36 +01:00
|
|
|
PathSet LocalStore::queryDerivationOutputs(const Path & path)
|
|
|
|
{
|
|
|
|
SQLiteTxn txn(db);
|
2012-07-30 21:42:18 +02:00
|
|
|
|
2010-02-22 13:44:36 +01:00
|
|
|
SQLiteStmtUse use(stmtQueryDerivationOutputs);
|
2010-02-24 16:07:23 +01:00
|
|
|
stmtQueryDerivationOutputs.bind(queryValidPathId(path));
|
2012-07-30 21:42:18 +02:00
|
|
|
|
2010-02-22 13:44:36 +01:00
|
|
|
PathSet outputs;
|
|
|
|
int r;
|
|
|
|
while ((r = sqlite3_step(stmtQueryDerivationOutputs)) == SQLITE_ROW) {
|
|
|
|
const char * s = (const char *) sqlite3_column_text(stmtQueryDerivationOutputs, 1);
|
|
|
|
assert(s);
|
|
|
|
outputs.insert(s);
|
|
|
|
}
|
2012-07-30 21:42:18 +02:00
|
|
|
|
2010-02-22 13:44:36 +01:00
|
|
|
if (r != SQLITE_DONE)
|
2010-12-05 19:23:19 +01:00
|
|
|
throwSQLiteError(db, format("error getting outputs of `%1%'") % path);
|
2010-02-22 13:44:36 +01:00
|
|
|
|
|
|
|
return outputs;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-11-06 07:28:20 +01:00
|
|
|
StringSet LocalStore::queryDerivationOutputNames(const Path & path)
|
|
|
|
{
|
|
|
|
SQLiteTxn txn(db);
|
2012-07-30 21:42:18 +02:00
|
|
|
|
2011-11-06 07:28:20 +01:00
|
|
|
SQLiteStmtUse use(stmtQueryDerivationOutputs);
|
|
|
|
stmtQueryDerivationOutputs.bind(queryValidPathId(path));
|
2012-07-30 21:42:18 +02:00
|
|
|
|
2011-11-06 07:28:20 +01:00
|
|
|
StringSet outputNames;
|
|
|
|
int r;
|
|
|
|
while ((r = sqlite3_step(stmtQueryDerivationOutputs)) == SQLITE_ROW) {
|
|
|
|
const char * s = (const char *) sqlite3_column_text(stmtQueryDerivationOutputs, 0);
|
|
|
|
assert(s);
|
|
|
|
outputNames.insert(s);
|
|
|
|
}
|
2012-07-30 21:42:18 +02:00
|
|
|
|
2011-11-06 07:28:20 +01:00
|
|
|
if (r != SQLITE_DONE)
|
|
|
|
throwSQLiteError(db, format("error getting output names of `%1%'") % path);
|
|
|
|
|
|
|
|
return outputNames;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-07-18 00:55:39 +02:00
|
|
|
Path LocalStore::queryPathFromHashPart(const string & hashPart)
|
|
|
|
{
|
|
|
|
if (hashPart.size() != 32) throw Error("invalid hash part");
|
2012-07-30 21:42:18 +02:00
|
|
|
|
2012-07-18 00:55:39 +02:00
|
|
|
SQLiteTxn txn(db);
|
|
|
|
|
2012-07-31 01:55:41 +02:00
|
|
|
Path prefix = settings.nixStore + "/" + hashPart;
|
2012-07-30 21:42:18 +02:00
|
|
|
|
2012-07-18 00:55:39 +02:00
|
|
|
SQLiteStmtUse use(stmtQueryPathFromHashPart);
|
|
|
|
stmtQueryPathFromHashPart.bind(prefix);
|
|
|
|
|
|
|
|
int res = sqlite3_step(stmtQueryPathFromHashPart);
|
|
|
|
if (res == SQLITE_DONE) return "";
|
|
|
|
if (res != SQLITE_ROW) throwSQLiteError(db, "finding path in database");
|
|
|
|
|
|
|
|
const char * s = (const char *) sqlite3_column_text(stmtQueryPathFromHashPart, 0);
|
|
|
|
return s && prefix.compare(0, prefix.size(), s, prefix.size()) == 0 ? s : "";
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-08-02 14:54:35 +02:00
|
|
|
void LocalStore::startSubstituter(const Path & substituter, RunningSubstituter & run)
|
2003-07-10 17:11:48 +02:00
|
|
|
{
|
2008-08-02 14:54:35 +02:00
|
|
|
if (run.pid != -1) return;
|
2012-07-30 21:42:18 +02:00
|
|
|
|
2008-08-02 14:54:35 +02:00
|
|
|
debug(format("starting substituter program `%1%'") % substituter);
|
|
|
|
|
2012-08-01 17:19:24 +02:00
|
|
|
Pipe toPipe, fromPipe, errorPipe;
|
2012-07-30 21:42:18 +02:00
|
|
|
|
2008-08-02 14:54:35 +02:00
|
|
|
toPipe.create();
|
|
|
|
fromPipe.create();
|
2012-08-01 17:19:24 +02:00
|
|
|
errorPipe.create();
|
2008-08-02 14:54:35 +02:00
|
|
|
|
|
|
|
run.pid = fork();
|
2012-07-30 21:42:18 +02:00
|
|
|
|
2008-08-02 14:54:35 +02:00
|
|
|
switch (run.pid) {
|
|
|
|
|
|
|
|
case -1:
|
|
|
|
throw SysError("unable to fork");
|
|
|
|
|
|
|
|
case 0: /* child */
|
|
|
|
try {
|
2009-09-23 20:04:55 +02:00
|
|
|
/* Hack to let "make check" succeed on Darwin. The
|
|
|
|
libtool wrapper script sets DYLD_LIBRARY_PATH to our
|
|
|
|
libutil (among others), but Perl also depends on a
|
|
|
|
library named libutil. As a result, substituters
|
|
|
|
written in Perl (i.e. all of them) fail. */
|
|
|
|
unsetenv("DYLD_LIBRARY_PATH");
|
2012-07-30 21:42:18 +02:00
|
|
|
|
2012-07-30 22:09:54 +02:00
|
|
|
/* Pass configuration options (including those overriden
|
|
|
|
with --option) to the substituter. */
|
2012-07-31 01:55:41 +02:00
|
|
|
setenv("_NIX_OPTIONS", settings.pack().c_str(), 1);
|
2012-07-30 22:09:54 +02:00
|
|
|
|
2008-08-02 14:54:35 +02:00
|
|
|
fromPipe.readSide.close();
|
|
|
|
toPipe.writeSide.close();
|
|
|
|
if (dup2(toPipe.readSide, STDIN_FILENO) == -1)
|
|
|
|
throw SysError("dupping stdin");
|
|
|
|
if (dup2(fromPipe.writeSide, STDOUT_FILENO) == -1)
|
|
|
|
throw SysError("dupping stdout");
|
2012-08-01 17:19:24 +02:00
|
|
|
if (dup2(errorPipe.writeSide, STDERR_FILENO) == -1)
|
|
|
|
throw SysError("dupping stderr");
|
2008-08-02 14:54:35 +02:00
|
|
|
closeMostFDs(set<int>());
|
|
|
|
execl(substituter.c_str(), substituter.c_str(), "--query", NULL);
|
|
|
|
throw SysError(format("executing `%1%'") % substituter);
|
|
|
|
} catch (std::exception & e) {
|
|
|
|
std::cerr << "error: " << e.what() << std::endl;
|
2004-06-20 21:17:54 +02:00
|
|
|
}
|
2008-08-02 14:54:35 +02:00
|
|
|
quickExit(1);
|
2004-06-20 21:17:54 +02:00
|
|
|
}
|
2003-12-05 12:05:19 +01:00
|
|
|
|
2008-08-02 14:54:35 +02:00
|
|
|
/* Parent. */
|
2012-07-30 21:42:18 +02:00
|
|
|
|
2009-03-28 20:41:53 +01:00
|
|
|
run.to = toPipe.writeSide.borrow();
|
|
|
|
run.from = fromPipe.readSide.borrow();
|
2012-08-01 17:19:24 +02:00
|
|
|
run.error = errorPipe.readSide.borrow();
|
2004-06-20 21:17:54 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2009-03-28 20:41:53 +01:00
|
|
|
template<class T> T getIntLine(int fd)
|
2008-08-05 12:57:53 +02:00
|
|
|
{
|
2009-03-28 20:41:53 +01:00
|
|
|
string s = readLine(fd);
|
2008-08-05 12:57:53 +02:00
|
|
|
T res;
|
2009-03-28 20:41:53 +01:00
|
|
|
if (!string2Int(s, res)) throw Error("integer expected from stream");
|
2008-08-05 12:57:53 +02:00
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-07-11 23:52:18 +02:00
|
|
|
PathSet LocalStore::querySubstitutablePaths(const PathSet & paths)
|
2004-06-20 21:17:54 +02:00
|
|
|
{
|
2012-07-11 23:52:18 +02:00
|
|
|
PathSet res;
|
2012-07-31 01:55:41 +02:00
|
|
|
foreach (Paths::iterator, i, settings.substituters) {
|
2012-07-11 23:52:18 +02:00
|
|
|
if (res.size() == paths.size()) break;
|
2008-08-02 14:54:35 +02:00
|
|
|
RunningSubstituter & run(runningSubstituters[*i]);
|
|
|
|
startSubstituter(*i, run);
|
2012-07-11 23:52:18 +02:00
|
|
|
string s = "have ";
|
2012-08-01 17:19:24 +02:00
|
|
|
foreach (PathSet::const_iterator, j, paths)
|
|
|
|
if (res.find(*j) == res.end()) { s += *j; s += " "; }
|
2012-07-11 23:52:18 +02:00
|
|
|
writeLine(run.to, s);
|
|
|
|
while (true) {
|
2012-08-01 17:19:24 +02:00
|
|
|
/* FIXME: we only read stderr when an error occurs, so
|
|
|
|
substituters should only write (short) messages to
|
|
|
|
stderr when they fail. I.e. they shouldn't write debug
|
|
|
|
output. */
|
|
|
|
try {
|
|
|
|
Path path = readLine(run.from);
|
|
|
|
if (path == "") break;
|
|
|
|
res.insert(path);
|
|
|
|
} catch (EndOfFile e) {
|
|
|
|
throw Error(format("substituter `%1%' failed: %2%") % *i % chomp(drainFD(run.error)));
|
|
|
|
}
|
2012-07-11 23:52:18 +02:00
|
|
|
}
|
2008-08-02 14:54:35 +02:00
|
|
|
}
|
2012-07-11 23:52:18 +02:00
|
|
|
return res;
|
2008-08-02 14:54:35 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
download-from-binary-cache: parallelise fetching of NAR info files
Getting substitute information using the binary cache substituter has
non-trivial latency overhead. A package or NixOS system configuration
can have hundreds of dependencies, and in the worst case (when the
local info cache is empty) we have to do a separate HTTP request for
each of these. If the ping time to the server is t, getting N info
files will take tN seconds; e.g., with a ping time of 0.1s to
nixos.org, sequentially downloading 1000 info files (a typical NixOS
config) will take at least 100 seconds.
To fix this problem, the binary cache substituter can now perform
requests in parallel. This required changing the substituter
interface to support a function querySubstitutablePathInfos() that
queries multiple paths at the same time, and rewriting queryMissing()
to take advantage of parallelism. (Due to local caching,
parallelising queryMissing() is sufficient for most use cases, since
it's almost always called before building a derivation and thus fills
the local info cache.)
For example, parallelism speeds up querying all 1056 paths in a
particular NixOS system configuration from 116s to 2.6s. It works so
well because the eccentricity of the top-level derivation in the
dependency graph is only 9. So we only need 10 round-trips (when
using an unlimited number of parallel connections) to get everything.
Currently we do a maximum of 150 parallel connections to the server.
Thus it's important that the binary cache server (e.g. nixos.org) has
a high connection limit. Alternatively we could use HTTP pipelining,
but WWW::Curl doesn't support it and libcurl has a hard-coded limit of
5 requests per pipeline.
2012-07-07 01:08:20 +02:00
|
|
|
void LocalStore::querySubstitutablePathInfos(const Path & substituter,
|
|
|
|
PathSet & paths, SubstitutablePathInfos & infos)
|
2008-08-02 14:54:35 +02:00
|
|
|
{
|
2008-08-04 15:15:35 +02:00
|
|
|
RunningSubstituter & run(runningSubstituters[substituter]);
|
|
|
|
startSubstituter(substituter, run);
|
2008-08-02 14:54:35 +02:00
|
|
|
|
download-from-binary-cache: parallelise fetching of NAR info files
Getting substitute information using the binary cache substituter has
non-trivial latency overhead. A package or NixOS system configuration
can have hundreds of dependencies, and in the worst case (when the
local info cache is empty) we have to do a separate HTTP request for
each of these. If the ping time to the server is t, getting N info
files will take tN seconds; e.g., with a ping time of 0.1s to
nixos.org, sequentially downloading 1000 info files (a typical NixOS
config) will take at least 100 seconds.
To fix this problem, the binary cache substituter can now perform
requests in parallel. This required changing the substituter
interface to support a function querySubstitutablePathInfos() that
queries multiple paths at the same time, and rewriting queryMissing()
to take advantage of parallelism. (Due to local caching,
parallelising queryMissing() is sufficient for most use cases, since
it's almost always called before building a derivation and thus fills
the local info cache.)
For example, parallelism speeds up querying all 1056 paths in a
particular NixOS system configuration from 116s to 2.6s. It works so
well because the eccentricity of the top-level derivation in the
dependency graph is only 9. So we only need 10 round-trips (when
using an unlimited number of parallel connections) to get everything.
Currently we do a maximum of 150 parallel connections to the server.
Thus it's important that the binary cache server (e.g. nixos.org) has
a high connection limit. Alternatively we could use HTTP pipelining,
but WWW::Curl doesn't support it and libcurl has a hard-coded limit of
5 requests per pipeline.
2012-07-07 01:08:20 +02:00
|
|
|
string s = "info ";
|
|
|
|
foreach (PathSet::const_iterator, i, paths)
|
|
|
|
if (infos.find(*i) == infos.end()) { s += *i; s += " "; }
|
|
|
|
writeLine(run.to, s);
|
|
|
|
|
|
|
|
while (true) {
|
2012-08-01 17:19:24 +02:00
|
|
|
try {
|
|
|
|
Path path = readLine(run.from);
|
|
|
|
if (path == "") break;
|
|
|
|
if (paths.find(path) == paths.end())
|
|
|
|
throw Error(format("got unexpected path `%1%' from substituter") % path);
|
|
|
|
paths.erase(path);
|
|
|
|
SubstitutablePathInfo & info(infos[path]);
|
|
|
|
info.deriver = readLine(run.from);
|
|
|
|
if (info.deriver != "") assertStorePath(info.deriver);
|
|
|
|
int nrRefs = getIntLine<int>(run.from);
|
|
|
|
while (nrRefs--) {
|
|
|
|
Path p = readLine(run.from);
|
|
|
|
assertStorePath(p);
|
|
|
|
info.references.insert(p);
|
|
|
|
}
|
|
|
|
info.downloadSize = getIntLine<long long>(run.from);
|
|
|
|
info.narSize = getIntLine<long long>(run.from);
|
|
|
|
} catch (EndOfFile e) {
|
|
|
|
throw Error(format("substituter `%1%' failed: %2%") % substituter % chomp(drainFD(run.error)));
|
download-from-binary-cache: parallelise fetching of NAR info files
Getting substitute information using the binary cache substituter has
non-trivial latency overhead. A package or NixOS system configuration
can have hundreds of dependencies, and in the worst case (when the
local info cache is empty) we have to do a separate HTTP request for
each of these. If the ping time to the server is t, getting N info
files will take tN seconds; e.g., with a ping time of 0.1s to
nixos.org, sequentially downloading 1000 info files (a typical NixOS
config) will take at least 100 seconds.
To fix this problem, the binary cache substituter can now perform
requests in parallel. This required changing the substituter
interface to support a function querySubstitutablePathInfos() that
queries multiple paths at the same time, and rewriting queryMissing()
to take advantage of parallelism. (Due to local caching,
parallelising queryMissing() is sufficient for most use cases, since
it's almost always called before building a derivation and thus fills
the local info cache.)
For example, parallelism speeds up querying all 1056 paths in a
particular NixOS system configuration from 116s to 2.6s. It works so
well because the eccentricity of the top-level derivation in the
dependency graph is only 9. So we only need 10 round-trips (when
using an unlimited number of parallel connections) to get everything.
Currently we do a maximum of 150 parallel connections to the server.
Thus it's important that the binary cache server (e.g. nixos.org) has
a high connection limit. Alternatively we could use HTTP pipelining,
but WWW::Curl doesn't support it and libcurl has a hard-coded limit of
5 requests per pipeline.
2012-07-07 01:08:20 +02:00
|
|
|
}
|
2008-08-02 14:54:35 +02:00
|
|
|
}
|
2008-08-04 15:15:35 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
download-from-binary-cache: parallelise fetching of NAR info files
Getting substitute information using the binary cache substituter has
non-trivial latency overhead. A package or NixOS system configuration
can have hundreds of dependencies, and in the worst case (when the
local info cache is empty) we have to do a separate HTTP request for
each of these. If the ping time to the server is t, getting N info
files will take tN seconds; e.g., with a ping time of 0.1s to
nixos.org, sequentially downloading 1000 info files (a typical NixOS
config) will take at least 100 seconds.
To fix this problem, the binary cache substituter can now perform
requests in parallel. This required changing the substituter
interface to support a function querySubstitutablePathInfos() that
queries multiple paths at the same time, and rewriting queryMissing()
to take advantage of parallelism. (Due to local caching,
parallelising queryMissing() is sufficient for most use cases, since
it's almost always called before building a derivation and thus fills
the local info cache.)
For example, parallelism speeds up querying all 1056 paths in a
particular NixOS system configuration from 116s to 2.6s. It works so
well because the eccentricity of the top-level derivation in the
dependency graph is only 9. So we only need 10 round-trips (when
using an unlimited number of parallel connections) to get everything.
Currently we do a maximum of 150 parallel connections to the server.
Thus it's important that the binary cache server (e.g. nixos.org) has
a high connection limit. Alternatively we could use HTTP pipelining,
but WWW::Curl doesn't support it and libcurl has a hard-coded limit of
5 requests per pipeline.
2012-07-07 01:08:20 +02:00
|
|
|
void LocalStore::querySubstitutablePathInfos(const PathSet & paths,
|
|
|
|
SubstitutablePathInfos & infos)
|
|
|
|
{
|
|
|
|
PathSet todo = paths;
|
2012-07-31 01:55:41 +02:00
|
|
|
foreach (Paths::iterator, i, settings.substituters) {
|
download-from-binary-cache: parallelise fetching of NAR info files
Getting substitute information using the binary cache substituter has
non-trivial latency overhead. A package or NixOS system configuration
can have hundreds of dependencies, and in the worst case (when the
local info cache is empty) we have to do a separate HTTP request for
each of these. If the ping time to the server is t, getting N info
files will take tN seconds; e.g., with a ping time of 0.1s to
nixos.org, sequentially downloading 1000 info files (a typical NixOS
config) will take at least 100 seconds.
To fix this problem, the binary cache substituter can now perform
requests in parallel. This required changing the substituter
interface to support a function querySubstitutablePathInfos() that
queries multiple paths at the same time, and rewriting queryMissing()
to take advantage of parallelism. (Due to local caching,
parallelising queryMissing() is sufficient for most use cases, since
it's almost always called before building a derivation and thus fills
the local info cache.)
For example, parallelism speeds up querying all 1056 paths in a
particular NixOS system configuration from 116s to 2.6s. It works so
well because the eccentricity of the top-level derivation in the
dependency graph is only 9. So we only need 10 round-trips (when
using an unlimited number of parallel connections) to get everything.
Currently we do a maximum of 150 parallel connections to the server.
Thus it's important that the binary cache server (e.g. nixos.org) has
a high connection limit. Alternatively we could use HTTP pipelining,
but WWW::Curl doesn't support it and libcurl has a hard-coded limit of
5 requests per pipeline.
2012-07-07 01:08:20 +02:00
|
|
|
if (todo.empty()) break;
|
|
|
|
querySubstitutablePathInfos(*i, todo, infos);
|
|
|
|
}
|
2004-12-20 14:43:32 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-06-09 15:52:45 +02:00
|
|
|
Hash LocalStore::queryPathHash(const Path & path)
|
2005-02-09 10:50:29 +01:00
|
|
|
{
|
2008-06-09 15:52:45 +02:00
|
|
|
return queryPathInfo(path).hash;
|
2005-02-09 10:50:29 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-12-14 14:25:20 +01:00
|
|
|
void LocalStore::registerValidPath(const ValidPathInfo & info)
|
|
|
|
{
|
|
|
|
ValidPathInfos infos;
|
|
|
|
infos.push_back(info);
|
|
|
|
registerValidPaths(infos);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-02-24 13:48:00 +01:00
|
|
|
void LocalStore::registerValidPaths(const ValidPathInfos & infos)
|
2005-03-02 16:57:06 +01:00
|
|
|
{
|
2012-07-31 01:55:41 +02:00
|
|
|
/* SQLite will fsync by default, but the new valid paths may not be fsync-ed.
|
2012-03-22 20:05:54 +01:00
|
|
|
* So some may want to fsync them before registering the validity, at the
|
|
|
|
* expense of some speed of the path registering operation. */
|
2012-07-31 01:55:41 +02:00
|
|
|
if (settings.syncBeforeRegistering) sync();
|
2012-03-22 20:05:54 +01:00
|
|
|
|
2010-12-14 14:25:20 +01:00
|
|
|
while (1) {
|
|
|
|
try {
|
|
|
|
SQLiteTxn txn(db);
|
2011-12-30 15:47:14 +01:00
|
|
|
PathSet paths;
|
2012-07-30 21:42:18 +02:00
|
|
|
|
2010-12-14 14:25:20 +01:00
|
|
|
foreach (ValidPathInfos::const_iterator, i, infos) {
|
|
|
|
assert(i->hash.type == htSHA256);
|
|
|
|
/* !!! Maybe the registration info should be updated if the
|
|
|
|
path is already valid. */
|
|
|
|
if (!isValidPath(i->path)) addValidPath(*i);
|
2011-12-30 15:47:14 +01:00
|
|
|
paths.insert(i->path);
|
2010-12-14 14:25:20 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
foreach (ValidPathInfos::const_iterator, i, infos) {
|
|
|
|
unsigned long long referrer = queryValidPathId(i->path);
|
|
|
|
foreach (PathSet::iterator, j, i->references)
|
|
|
|
addReference(referrer, queryValidPathId(*j));
|
|
|
|
}
|
2005-01-19 17:39:47 +01:00
|
|
|
|
2011-12-30 15:47:14 +01:00
|
|
|
/* Do a topological sort of the paths. This will throw an
|
|
|
|
error if a cycle is detected and roll back the
|
|
|
|
transaction. Cycles can only occur when a derivation
|
|
|
|
has multiple outputs. */
|
|
|
|
topoSortPaths(*this, paths);
|
|
|
|
|
2010-12-14 14:25:20 +01:00
|
|
|
txn.commit();
|
|
|
|
break;
|
|
|
|
} catch (SQLiteBusy & e) {
|
|
|
|
/* Retry; the `txn' destructor will roll back the current
|
|
|
|
transaction. */
|
|
|
|
}
|
2010-02-24 13:48:00 +01:00
|
|
|
}
|
2003-10-08 17:06:59 +02:00
|
|
|
}
|
2003-07-07 11:25:26 +02:00
|
|
|
|
2003-07-31 18:05:35 +02:00
|
|
|
|
2005-01-31 15:00:43 +01:00
|
|
|
/* Invalidate a path. The caller is responsible for checking that
|
2005-12-13 22:04:48 +01:00
|
|
|
there are no referrers. */
|
2008-06-09 15:52:45 +02:00
|
|
|
void LocalStore::invalidatePath(const Path & path)
|
2003-07-08 11:54:47 +02:00
|
|
|
{
|
2007-08-12 02:29:28 +02:00
|
|
|
debug(format("invalidating path `%1%'") % path);
|
2010-02-22 15:18:55 +01:00
|
|
|
|
2011-07-20 20:10:47 +02:00
|
|
|
drvHashes.erase(path);
|
|
|
|
|
2010-02-19 17:43:25 +01:00
|
|
|
SQLiteStmtUse use(stmtInvalidatePath);
|
2003-07-08 11:54:47 +02:00
|
|
|
|
2010-02-19 17:43:25 +01:00
|
|
|
stmtInvalidatePath.bind(path);
|
2008-06-09 15:52:45 +02:00
|
|
|
|
2010-02-19 17:43:25 +01:00
|
|
|
if (sqlite3_step(stmtInvalidatePath) != SQLITE_DONE)
|
2010-12-05 19:23:19 +01:00
|
|
|
throwSQLiteError(db, format("invalidating path `%1%' in database") % path);
|
2008-06-09 15:52:45 +02:00
|
|
|
|
2010-02-19 17:43:25 +01:00
|
|
|
/* Note that the foreign key constraints on the Refs table take
|
2010-02-22 15:18:55 +01:00
|
|
|
care of deleting the references entries for `path'. */
|
2003-07-08 11:54:47 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-12-03 19:05:14 +01:00
|
|
|
Path LocalStore::addToStoreFromDump(const string & dump, const string & name,
|
2012-10-03 21:09:18 +02:00
|
|
|
bool recursive, HashType hashAlgo, bool repair)
|
2003-07-07 11:25:26 +02:00
|
|
|
{
|
2008-12-03 19:05:14 +01:00
|
|
|
Hash h = hashString(hashAlgo, dump);
|
2008-12-03 16:51:17 +01:00
|
|
|
|
2008-12-03 19:05:14 +01:00
|
|
|
Path dstPath = makeFixedOutputPath(recursive, hashAlgo, h, name);
|
2003-07-10 17:11:48 +02:00
|
|
|
|
2006-12-01 21:51:18 +01:00
|
|
|
addTempRoot(dstPath);
|
2005-01-31 11:27:25 +01:00
|
|
|
|
2012-10-03 21:09:18 +02:00
|
|
|
if (repair || !isValidPath(dstPath)) {
|
2003-07-10 17:11:48 +02:00
|
|
|
|
2003-10-08 17:06:59 +02:00
|
|
|
/* The first check above is an optimisation to prevent
|
|
|
|
unnecessary lock acquisition. */
|
2003-07-22 17:15:15 +02:00
|
|
|
|
2006-06-01 20:13:33 +02:00
|
|
|
PathLocks outputLock(singleton<PathSet, Path>(dstPath));
|
2003-07-22 17:15:15 +02:00
|
|
|
|
2012-10-03 21:09:18 +02:00
|
|
|
if (repair || !isValidPath(dstPath)) {
|
2004-06-21 09:46:02 +02:00
|
|
|
|
2006-12-09 01:26:24 +01:00
|
|
|
if (pathExists(dstPath)) deletePathWrapped(dstPath);
|
2004-10-25 16:38:23 +02:00
|
|
|
|
2008-12-03 16:51:17 +01:00
|
|
|
if (recursive) {
|
2008-12-03 19:05:14 +01:00
|
|
|
StringSource source(dump);
|
2008-12-03 16:51:17 +01:00
|
|
|
restorePath(dstPath, source);
|
|
|
|
} else
|
2010-01-29 13:22:58 +01:00
|
|
|
writeFile(dstPath, dump);
|
2005-01-14 14:51:38 +01:00
|
|
|
|
2005-01-19 17:39:47 +01:00
|
|
|
canonicalisePathMetaData(dstPath);
|
2008-12-03 16:51:17 +01:00
|
|
|
|
|
|
|
/* Register the SHA-256 hash of the NAR serialisation of
|
|
|
|
the path in the database. We may just have computed it
|
|
|
|
above (if called with recursive == true and hashAlgo ==
|
|
|
|
sha256); otherwise, compute it here. */
|
2010-11-16 18:11:46 +01:00
|
|
|
HashResult hash;
|
|
|
|
if (recursive) {
|
|
|
|
hash.first = hashAlgo == htSHA256 ? h : hashString(htSHA256, dump);
|
|
|
|
hash.second = dump.size();
|
|
|
|
} else
|
|
|
|
hash = hashPath(htSHA256, dstPath);
|
2012-07-23 21:02:52 +02:00
|
|
|
|
|
|
|
optimisePath(dstPath); // FIXME: combine with hashPath()
|
2012-07-30 21:42:18 +02:00
|
|
|
|
2010-11-16 18:11:46 +01:00
|
|
|
ValidPathInfo info;
|
|
|
|
info.path = dstPath;
|
|
|
|
info.hash = hash.first;
|
|
|
|
info.narSize = hash.second;
|
|
|
|
registerValidPath(info);
|
2003-08-01 11:01:51 +02:00
|
|
|
}
|
2003-11-22 19:45:56 +01:00
|
|
|
|
|
|
|
outputLock.setDeletion(true);
|
2003-06-16 15:33:38 +02:00
|
|
|
}
|
2003-08-04 09:09:36 +02:00
|
|
|
|
2003-10-08 17:06:59 +02:00
|
|
|
return dstPath;
|
2003-06-16 15:33:38 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-12-03 19:05:14 +01:00
|
|
|
Path LocalStore::addToStore(const Path & _srcPath,
|
2012-10-03 21:09:18 +02:00
|
|
|
bool recursive, HashType hashAlgo, PathFilter & filter, bool repair)
|
2008-12-03 19:05:14 +01:00
|
|
|
{
|
|
|
|
Path srcPath(absPath(_srcPath));
|
|
|
|
debug(format("adding `%1%' to the store") % srcPath);
|
|
|
|
|
|
|
|
/* Read the whole path into memory. This is not a very scalable
|
|
|
|
method for very large paths, but `copyPath' is mainly used for
|
|
|
|
small files. */
|
|
|
|
StringSink sink;
|
download-from-binary-cache: parallelise fetching of NAR info files
Getting substitute information using the binary cache substituter has
non-trivial latency overhead. A package or NixOS system configuration
can have hundreds of dependencies, and in the worst case (when the
local info cache is empty) we have to do a separate HTTP request for
each of these. If the ping time to the server is t, getting N info
files will take tN seconds; e.g., with a ping time of 0.1s to
nixos.org, sequentially downloading 1000 info files (a typical NixOS
config) will take at least 100 seconds.
To fix this problem, the binary cache substituter can now perform
requests in parallel. This required changing the substituter
interface to support a function querySubstitutablePathInfos() that
queries multiple paths at the same time, and rewriting queryMissing()
to take advantage of parallelism. (Due to local caching,
parallelising queryMissing() is sufficient for most use cases, since
it's almost always called before building a derivation and thus fills
the local info cache.)
For example, parallelism speeds up querying all 1056 paths in a
particular NixOS system configuration from 116s to 2.6s. It works so
well because the eccentricity of the top-level derivation in the
dependency graph is only 9. So we only need 10 round-trips (when
using an unlimited number of parallel connections) to get everything.
Currently we do a maximum of 150 parallel connections to the server.
Thus it's important that the binary cache server (e.g. nixos.org) has
a high connection limit. Alternatively we could use HTTP pipelining,
but WWW::Curl doesn't support it and libcurl has a hard-coded limit of
5 requests per pipeline.
2012-07-07 01:08:20 +02:00
|
|
|
if (recursive)
|
2008-12-03 19:05:14 +01:00
|
|
|
dumpPath(srcPath, sink, filter);
|
|
|
|
else
|
|
|
|
sink.s = readFile(srcPath);
|
|
|
|
|
2012-10-03 21:09:18 +02:00
|
|
|
return addToStoreFromDump(sink.s, baseNameOf(srcPath), recursive, hashAlgo, repair);
|
2008-12-03 19:05:14 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-12-03 16:06:30 +01:00
|
|
|
Path LocalStore::addTextToStore(const string & name, const string & s,
|
2012-10-03 21:09:18 +02:00
|
|
|
const PathSet & references, bool repair)
|
2003-10-15 14:42:39 +02:00
|
|
|
{
|
2008-12-03 16:06:30 +01:00
|
|
|
Path dstPath = computeStorePathForText(name, s, references);
|
2012-07-30 21:42:18 +02:00
|
|
|
|
2006-12-01 21:51:18 +01:00
|
|
|
addTempRoot(dstPath);
|
2005-01-31 11:27:25 +01:00
|
|
|
|
2012-10-03 21:09:18 +02:00
|
|
|
if (repair || !isValidPath(dstPath)) {
|
2003-10-15 14:42:39 +02:00
|
|
|
|
2006-06-01 20:13:33 +02:00
|
|
|
PathLocks outputLock(singleton<PathSet, Path>(dstPath));
|
2003-10-23 12:51:55 +02:00
|
|
|
|
2012-10-03 21:09:18 +02:00
|
|
|
if (repair || !isValidPath(dstPath)) {
|
2004-06-21 09:46:02 +02:00
|
|
|
|
2006-12-09 01:26:24 +01:00
|
|
|
if (pathExists(dstPath)) deletePathWrapped(dstPath);
|
2004-06-21 09:46:02 +02:00
|
|
|
|
2010-01-29 13:22:58 +01:00
|
|
|
writeFile(dstPath, s);
|
2003-10-15 14:42:39 +02:00
|
|
|
|
2005-01-19 17:39:47 +01:00
|
|
|
canonicalisePathMetaData(dstPath);
|
2010-11-16 18:11:46 +01:00
|
|
|
|
|
|
|
HashResult hash = hashPath(htSHA256, dstPath);
|
2012-07-23 21:02:52 +02:00
|
|
|
|
|
|
|
optimisePath(dstPath);
|
2012-07-30 21:42:18 +02:00
|
|
|
|
2010-11-16 18:11:46 +01:00
|
|
|
ValidPathInfo info;
|
|
|
|
info.path = dstPath;
|
|
|
|
info.hash = hash.first;
|
|
|
|
info.narSize = hash.second;
|
|
|
|
info.references = references;
|
|
|
|
registerValidPath(info);
|
2003-10-23 12:51:55 +02:00
|
|
|
}
|
2003-11-22 19:45:56 +01:00
|
|
|
|
|
|
|
outputLock.setDeletion(true);
|
2003-10-15 14:42:39 +02:00
|
|
|
}
|
2005-01-14 14:51:38 +01:00
|
|
|
|
|
|
|
return dstPath;
|
2003-10-15 14:42:39 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-02-21 15:31:42 +01:00
|
|
|
struct HashAndWriteSink : Sink
|
|
|
|
{
|
|
|
|
Sink & writeSink;
|
|
|
|
HashSink hashSink;
|
|
|
|
HashAndWriteSink(Sink & writeSink) : writeSink(writeSink), hashSink(htSHA256)
|
|
|
|
{
|
|
|
|
}
|
2011-12-15 17:19:53 +01:00
|
|
|
virtual void operator () (const unsigned char * data, size_t len)
|
2007-02-21 15:31:42 +01:00
|
|
|
{
|
|
|
|
writeSink(data, len);
|
2010-03-09 15:32:03 +01:00
|
|
|
hashSink(data, len);
|
|
|
|
}
|
|
|
|
Hash currentHash()
|
|
|
|
{
|
2011-12-15 17:19:53 +01:00
|
|
|
return hashSink.currentHash().first;
|
2007-02-21 15:31:42 +01:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
#define EXPORT_MAGIC 0x4558494e
|
|
|
|
|
|
|
|
|
2007-02-21 18:51:10 +01:00
|
|
|
static void checkSecrecy(const Path & path)
|
|
|
|
{
|
|
|
|
struct stat st;
|
|
|
|
if (stat(path.c_str(), &st))
|
|
|
|
throw SysError(format("getting status of `%1%'") % path);
|
|
|
|
if ((st.st_mode & (S_IRWXG | S_IRWXO)) != 0)
|
|
|
|
throw Error(format("file `%1%' should be secret (inaccessible to everybody else)!") % path);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-02-21 00:17:20 +01:00
|
|
|
void LocalStore::exportPath(const Path & path, bool sign,
|
|
|
|
Sink & sink)
|
|
|
|
{
|
|
|
|
assertStorePath(path);
|
2007-02-21 15:31:42 +01:00
|
|
|
|
2007-02-21 17:23:25 +01:00
|
|
|
addTempRoot(path);
|
2008-03-01 22:05:33 +01:00
|
|
|
if (!isValidPath(path))
|
2007-02-21 17:23:25 +01:00
|
|
|
throw Error(format("path `%1%' is not valid") % path);
|
|
|
|
|
2007-02-21 15:31:42 +01:00
|
|
|
HashAndWriteSink hashAndWriteSink(sink);
|
2012-07-30 21:42:18 +02:00
|
|
|
|
2007-02-21 15:31:42 +01:00
|
|
|
dumpPath(path, hashAndWriteSink);
|
2007-02-21 00:17:20 +01:00
|
|
|
|
2010-03-09 15:32:03 +01:00
|
|
|
/* Refuse to export paths that have changed. This prevents
|
2010-08-24 16:25:33 +02:00
|
|
|
filesystem corruption from spreading to other machines.
|
|
|
|
Don't complain if the stored hash is zero (unknown). */
|
2010-03-09 15:32:03 +01:00
|
|
|
Hash hash = hashAndWriteSink.currentHash();
|
|
|
|
Hash storedHash = queryPathHash(path);
|
2010-08-24 16:25:33 +02:00
|
|
|
if (hash != storedHash && storedHash != Hash(storedHash.type))
|
2010-03-09 15:32:03 +01:00
|
|
|
throw Error(format("hash of path `%1%' has changed from `%2%' to `%3%'!") % path
|
|
|
|
% printHash(storedHash) % printHash(hash));
|
|
|
|
|
2007-02-21 15:31:42 +01:00
|
|
|
writeInt(EXPORT_MAGIC, hashAndWriteSink);
|
|
|
|
|
|
|
|
writeString(path, hashAndWriteSink);
|
2012-07-30 21:42:18 +02:00
|
|
|
|
2007-02-21 00:17:20 +01:00
|
|
|
PathSet references;
|
2008-03-01 22:05:33 +01:00
|
|
|
queryReferences(path, references);
|
2011-12-16 23:31:25 +01:00
|
|
|
writeStrings(references, hashAndWriteSink);
|
2007-02-21 00:17:20 +01:00
|
|
|
|
2008-03-01 22:05:33 +01:00
|
|
|
Path deriver = queryDeriver(path);
|
2007-02-21 15:31:42 +01:00
|
|
|
writeString(deriver, hashAndWriteSink);
|
|
|
|
|
|
|
|
if (sign) {
|
2010-03-09 15:32:03 +01:00
|
|
|
Hash hash = hashAndWriteSink.currentHash();
|
2012-07-30 21:42:18 +02:00
|
|
|
|
2007-02-21 15:31:42 +01:00
|
|
|
writeInt(1, hashAndWriteSink);
|
2012-07-30 21:42:18 +02:00
|
|
|
|
2007-02-21 15:31:42 +01:00
|
|
|
Path tmpDir = createTempDir();
|
|
|
|
AutoDelete delTmp(tmpDir);
|
|
|
|
Path hashFile = tmpDir + "/hash";
|
2010-01-29 13:22:58 +01:00
|
|
|
writeFile(hashFile, printHash(hash));
|
2007-02-21 15:31:42 +01:00
|
|
|
|
2012-07-31 01:55:41 +02:00
|
|
|
Path secretKey = settings.nixConfDir + "/signing-key.sec";
|
2007-02-21 18:51:10 +01:00
|
|
|
checkSecrecy(secretKey);
|
|
|
|
|
2007-02-21 15:31:42 +01:00
|
|
|
Strings args;
|
|
|
|
args.push_back("rsautl");
|
|
|
|
args.push_back("-sign");
|
|
|
|
args.push_back("-inkey");
|
2007-02-21 18:51:10 +01:00
|
|
|
args.push_back(secretKey);
|
2007-02-21 15:31:42 +01:00
|
|
|
args.push_back("-in");
|
|
|
|
args.push_back(hashFile);
|
2007-03-01 14:30:46 +01:00
|
|
|
string signature = runProgram(OPENSSL_PATH, true, args);
|
2007-02-21 15:31:42 +01:00
|
|
|
|
|
|
|
writeString(signature, hashAndWriteSink);
|
2012-07-30 21:42:18 +02:00
|
|
|
|
2007-02-21 15:31:42 +01:00
|
|
|
} else
|
|
|
|
writeInt(0, hashAndWriteSink);
|
2007-02-21 00:17:20 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-02-21 16:45:32 +01:00
|
|
|
struct HashAndReadSource : Source
|
|
|
|
{
|
|
|
|
Source & readSource;
|
|
|
|
HashSink hashSink;
|
|
|
|
bool hashing;
|
|
|
|
HashAndReadSource(Source & readSource) : readSource(readSource), hashSink(htSHA256)
|
|
|
|
{
|
|
|
|
hashing = true;
|
|
|
|
}
|
2011-12-16 20:44:13 +01:00
|
|
|
size_t read(unsigned char * data, size_t len)
|
2007-02-21 16:45:32 +01:00
|
|
|
{
|
2011-12-16 20:44:13 +01:00
|
|
|
size_t n = readSource.read(data, len);
|
|
|
|
if (hashing) hashSink(data, n);
|
|
|
|
return n;
|
2007-02-21 16:45:32 +01:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
|
2010-06-21 13:08:09 +02:00
|
|
|
/* Create a temporary directory in the store that won't be
|
|
|
|
garbage-collected. */
|
|
|
|
Path LocalStore::createTempDirInStore()
|
|
|
|
{
|
|
|
|
Path tmpDir;
|
|
|
|
do {
|
|
|
|
/* There is a slight possibility that `tmpDir' gets deleted by
|
|
|
|
the GC between createTempDir() and addTempRoot(), so repeat
|
|
|
|
until `tmpDir' exists. */
|
2012-07-31 01:55:41 +02:00
|
|
|
tmpDir = createTempDir(settings.nixStore);
|
2010-06-21 13:08:09 +02:00
|
|
|
addTempRoot(tmpDir);
|
|
|
|
} while (!pathExists(tmpDir));
|
|
|
|
return tmpDir;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-02-21 16:45:32 +01:00
|
|
|
Path LocalStore::importPath(bool requireSignature, Source & source)
|
|
|
|
{
|
|
|
|
HashAndReadSource hashAndReadSource(source);
|
2012-07-30 21:42:18 +02:00
|
|
|
|
2007-02-21 16:45:32 +01:00
|
|
|
/* We don't yet know what store path this archive contains (the
|
|
|
|
store path follows the archive data proper), and besides, we
|
|
|
|
don't know yet whether the signature is valid. */
|
2010-06-21 13:08:09 +02:00
|
|
|
Path tmpDir = createTempDirInStore();
|
|
|
|
AutoDelete delTmp(tmpDir);
|
2007-02-21 16:45:32 +01:00
|
|
|
Path unpacked = tmpDir + "/unpacked";
|
|
|
|
|
|
|
|
restorePath(unpacked, hashAndReadSource);
|
|
|
|
|
|
|
|
unsigned int magic = readInt(hashAndReadSource);
|
|
|
|
if (magic != EXPORT_MAGIC)
|
|
|
|
throw Error("Nix archive cannot be imported; wrong format");
|
|
|
|
|
|
|
|
Path dstPath = readStorePath(hashAndReadSource);
|
|
|
|
|
2012-03-05 18:13:44 +01:00
|
|
|
printMsg(lvlInfo, format("importing path `%1%'") % dstPath);
|
|
|
|
|
2011-12-16 23:31:25 +01:00
|
|
|
PathSet references = readStorePaths<PathSet>(hashAndReadSource);
|
2007-02-21 16:45:32 +01:00
|
|
|
|
2007-02-28 00:18:57 +01:00
|
|
|
Path deriver = readString(hashAndReadSource);
|
|
|
|
if (deriver != "") assertStorePath(deriver);
|
2007-02-21 16:45:32 +01:00
|
|
|
|
2010-11-16 18:11:46 +01:00
|
|
|
Hash hash = hashAndReadSource.hashSink.finish().first;
|
2007-02-21 16:45:32 +01:00
|
|
|
hashAndReadSource.hashing = false;
|
|
|
|
|
|
|
|
bool haveSignature = readInt(hashAndReadSource) == 1;
|
|
|
|
|
|
|
|
if (requireSignature && !haveSignature)
|
2011-11-23 16:13:37 +01:00
|
|
|
throw Error(format("imported archive of `%1%' lacks a signature") % dstPath);
|
2012-07-30 21:42:18 +02:00
|
|
|
|
2007-02-21 16:45:32 +01:00
|
|
|
if (haveSignature) {
|
|
|
|
string signature = readString(hashAndReadSource);
|
|
|
|
|
2007-03-01 13:30:24 +01:00
|
|
|
if (requireSignature) {
|
|
|
|
Path sigFile = tmpDir + "/sig";
|
2010-01-29 13:22:58 +01:00
|
|
|
writeFile(sigFile, signature);
|
2007-03-01 13:30:24 +01:00
|
|
|
|
|
|
|
Strings args;
|
|
|
|
args.push_back("rsautl");
|
|
|
|
args.push_back("-verify");
|
|
|
|
args.push_back("-inkey");
|
2012-07-31 01:55:41 +02:00
|
|
|
args.push_back(settings.nixConfDir + "/signing-key.pub");
|
2007-03-01 13:30:24 +01:00
|
|
|
args.push_back("-pubin");
|
|
|
|
args.push_back("-in");
|
|
|
|
args.push_back(sigFile);
|
2007-03-01 14:30:46 +01:00
|
|
|
string hash2 = runProgram(OPENSSL_PATH, true, args);
|
2007-03-01 13:30:24 +01:00
|
|
|
|
|
|
|
/* Note: runProgram() throws an exception if the signature
|
|
|
|
is invalid. */
|
|
|
|
|
|
|
|
if (printHash(hash) != hash2)
|
|
|
|
throw Error(
|
|
|
|
"signed hash doesn't match actual contents of imported "
|
|
|
|
"archive; archive could be corrupt, or someone is trying "
|
|
|
|
"to import a Trojan horse");
|
|
|
|
}
|
2007-02-21 16:45:32 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Do the actual import. */
|
|
|
|
|
|
|
|
/* !!! way too much code duplication with addTextToStore() etc. */
|
|
|
|
addTempRoot(dstPath);
|
|
|
|
|
|
|
|
if (!isValidPath(dstPath)) {
|
|
|
|
|
2009-02-02 18:24:10 +01:00
|
|
|
PathLocks outputLock;
|
|
|
|
|
|
|
|
/* Lock the output path. But don't lock if we're being called
|
|
|
|
from a build hook (whose parent process already acquired a
|
|
|
|
lock on this path). */
|
2012-09-19 21:43:23 +02:00
|
|
|
Strings locksHeld = tokenizeString<Strings>(getEnv("NIX_HELD_LOCKS"));
|
2009-02-02 18:24:10 +01:00
|
|
|
if (find(locksHeld.begin(), locksHeld.end(), dstPath) == locksHeld.end())
|
|
|
|
outputLock.lockPaths(singleton<PathSet, Path>(dstPath));
|
2007-02-21 16:45:32 +01:00
|
|
|
|
|
|
|
if (!isValidPath(dstPath)) {
|
|
|
|
|
|
|
|
if (pathExists(dstPath)) deletePathWrapped(dstPath);
|
|
|
|
|
|
|
|
if (rename(unpacked.c_str(), dstPath.c_str()) == -1)
|
|
|
|
throw SysError(format("cannot move `%1%' to `%2%'")
|
|
|
|
% unpacked % dstPath);
|
|
|
|
|
|
|
|
canonicalisePathMetaData(dstPath);
|
2012-07-30 21:42:18 +02:00
|
|
|
|
2007-02-21 16:45:32 +01:00
|
|
|
/* !!! if we were clever, we could prevent the hashPath()
|
|
|
|
here. */
|
2010-11-16 18:11:46 +01:00
|
|
|
HashResult hash = hashPath(htSHA256, dstPath);
|
2012-07-23 21:02:52 +02:00
|
|
|
|
|
|
|
optimisePath(dstPath); // FIXME: combine with hashPath()
|
2012-07-30 21:42:18 +02:00
|
|
|
|
2010-11-16 18:11:46 +01:00
|
|
|
ValidPathInfo info;
|
|
|
|
info.path = dstPath;
|
|
|
|
info.hash = hash.first;
|
|
|
|
info.narSize = hash.second;
|
|
|
|
info.references = references;
|
|
|
|
info.deriver = deriver != "" && isValidPath(deriver) ? deriver : "";
|
|
|
|
registerValidPath(info);
|
2007-02-21 16:45:32 +01:00
|
|
|
}
|
2012-07-30 21:42:18 +02:00
|
|
|
|
2007-02-21 16:45:32 +01:00
|
|
|
outputLock.setDeletion(true);
|
|
|
|
}
|
2012-07-30 21:42:18 +02:00
|
|
|
|
2007-02-21 16:45:32 +01:00
|
|
|
return dstPath;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-12-16 23:31:25 +01:00
|
|
|
Paths LocalStore::importPaths(bool requireSignature, Source & source)
|
|
|
|
{
|
|
|
|
Paths res;
|
|
|
|
while (true) {
|
|
|
|
unsigned long long n = readLongLong(source);
|
|
|
|
if (n == 0) break;
|
|
|
|
if (n != 1) throw Error("input doesn't look like something created by `nix-store --export'");
|
|
|
|
res.push_back(importPath(requireSignature, source));
|
|
|
|
}
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-03-26 20:43:33 +02:00
|
|
|
void LocalStore::invalidatePathChecked(const Path & path)
|
2003-06-23 16:40:49 +02:00
|
|
|
{
|
2004-02-14 22:44:18 +01:00
|
|
|
assertStorePath(path);
|
2003-07-08 11:54:47 +02:00
|
|
|
|
2010-12-05 19:23:19 +01:00
|
|
|
while (1) {
|
|
|
|
try {
|
|
|
|
SQLiteTxn txn(db);
|
|
|
|
|
|
|
|
if (isValidPath(path)) {
|
|
|
|
PathSet referrers; queryReferrers(path, referrers);
|
|
|
|
referrers.erase(path); /* ignore self-references */
|
|
|
|
if (!referrers.empty())
|
2011-12-05 22:04:20 +01:00
|
|
|
throw PathInUse(format("cannot delete path `%1%' because it is in use by %2%")
|
2010-12-05 19:23:19 +01:00
|
|
|
% path % showPaths(referrers));
|
|
|
|
invalidatePath(path);
|
|
|
|
}
|
|
|
|
|
|
|
|
txn.commit();
|
|
|
|
break;
|
|
|
|
} catch (SQLiteBusy & e) { };
|
2005-01-31 15:00:43 +01:00
|
|
|
}
|
2003-06-23 16:40:49 +02:00
|
|
|
}
|
2003-07-17 14:27:55 +02:00
|
|
|
|
|
|
|
|
2012-10-02 21:04:59 +02:00
|
|
|
bool LocalStore::verifyStore(bool checkContents, bool repair)
|
2003-07-17 14:27:55 +02:00
|
|
|
{
|
2010-08-31 13:47:31 +02:00
|
|
|
printMsg(lvlError, format("reading the Nix store..."));
|
2007-01-14 18:28:30 +01:00
|
|
|
|
2012-10-02 21:04:59 +02:00
|
|
|
bool errors = false;
|
|
|
|
|
2010-08-31 13:47:31 +02:00
|
|
|
/* Acquire the global GC lock to prevent a garbage collection. */
|
|
|
|
AutoCloseFD fdGCLock = openGCLock(ltWrite);
|
2012-07-30 21:42:18 +02:00
|
|
|
|
2012-07-31 01:55:41 +02:00
|
|
|
Paths entries = readDirectory(settings.nixStore);
|
2010-08-31 13:47:31 +02:00
|
|
|
PathSet store(entries.begin(), entries.end());
|
|
|
|
|
|
|
|
/* Check whether all valid paths actually exist. */
|
|
|
|
printMsg(lvlInfo, "checking path existence...");
|
|
|
|
|
2012-07-11 16:49:04 +02:00
|
|
|
PathSet validPaths2 = queryAllValidPaths(), validPaths, done;
|
2010-08-31 13:47:31 +02:00
|
|
|
|
|
|
|
foreach (PathSet::iterator, i, validPaths2)
|
2012-10-02 21:04:59 +02:00
|
|
|
verifyPath(*i, store, done, validPaths, repair, errors);
|
2003-07-17 14:27:55 +02:00
|
|
|
|
2010-12-06 16:29:38 +01:00
|
|
|
/* Release the GC lock so that checking content hashes (which can
|
|
|
|
take ages) doesn't block the GC or builds. */
|
|
|
|
fdGCLock.close();
|
|
|
|
|
2010-02-18 17:51:27 +01:00
|
|
|
/* Optionally, check the content hashes (slow). */
|
|
|
|
if (checkContents) {
|
2010-08-31 13:47:31 +02:00
|
|
|
printMsg(lvlInfo, "checking hashes...");
|
2007-01-14 18:28:30 +01:00
|
|
|
|
2011-12-02 18:52:18 +01:00
|
|
|
Hash nullHash(htSHA256);
|
|
|
|
|
2010-02-18 17:51:27 +01:00
|
|
|
foreach (PathSet::iterator, i, validPaths) {
|
2010-12-06 16:29:38 +01:00
|
|
|
try {
|
|
|
|
ValidPathInfo info = queryPathInfo(*i);
|
|
|
|
|
|
|
|
/* Check the content hash (optionally - slow). */
|
|
|
|
printMsg(lvlTalkative, format("checking contents of `%1%'") % *i);
|
|
|
|
HashResult current = hashPath(info.hash.type, *i);
|
2012-07-30 21:42:18 +02:00
|
|
|
|
2011-12-02 18:52:18 +01:00
|
|
|
if (info.hash != nullHash && info.hash != current.first) {
|
2010-12-06 16:29:38 +01:00
|
|
|
printMsg(lvlError, format("path `%1%' was modified! "
|
|
|
|
"expected hash `%2%', got `%3%'")
|
|
|
|
% *i % printHash(info.hash) % printHash(current.first));
|
2012-10-02 21:04:59 +02:00
|
|
|
if (repair) repairPath(*i); else errors = true;
|
2010-12-06 16:29:38 +01:00
|
|
|
} else {
|
2011-12-02 18:52:18 +01:00
|
|
|
|
|
|
|
bool update = false;
|
|
|
|
|
|
|
|
/* Fill in missing hashes. */
|
|
|
|
if (info.hash == nullHash) {
|
|
|
|
printMsg(lvlError, format("fixing missing hash on `%1%'") % *i);
|
|
|
|
info.hash = current.first;
|
|
|
|
update = true;
|
|
|
|
}
|
2012-07-30 21:42:18 +02:00
|
|
|
|
2010-12-06 16:29:38 +01:00
|
|
|
/* Fill in missing narSize fields (from old stores). */
|
|
|
|
if (info.narSize == 0) {
|
|
|
|
printMsg(lvlError, format("updating size field on `%1%' to %2%") % *i % current.second);
|
|
|
|
info.narSize = current.second;
|
2012-07-30 21:42:18 +02:00
|
|
|
update = true;
|
2010-12-06 16:29:38 +01:00
|
|
|
}
|
2011-12-02 18:52:18 +01:00
|
|
|
|
|
|
|
if (update) updatePathInfo(info);
|
|
|
|
|
2010-12-06 16:29:38 +01:00
|
|
|
}
|
2012-07-30 21:42:18 +02:00
|
|
|
|
2010-12-06 16:29:38 +01:00
|
|
|
} catch (Error & e) {
|
|
|
|
/* It's possible that the path got GC'ed, so ignore
|
|
|
|
errors on invalid paths. */
|
|
|
|
if (isValidPath(*i)) throw;
|
|
|
|
printMsg(lvlError, format("warning: %1%") % e.msg());
|
2012-10-02 21:04:59 +02:00
|
|
|
errors = true;
|
2010-12-06 16:29:38 +01:00
|
|
|
}
|
2005-02-08 14:23:55 +01:00
|
|
|
}
|
2007-08-13 13:37:39 +02:00
|
|
|
}
|
2012-10-02 21:04:59 +02:00
|
|
|
|
|
|
|
return errors;
|
2010-02-18 16:11:08 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-08-31 13:47:31 +02:00
|
|
|
void LocalStore::verifyPath(const Path & path, const PathSet & store,
|
2012-10-02 21:04:59 +02:00
|
|
|
PathSet & done, PathSet & validPaths, bool repair, bool & errors)
|
2010-08-31 13:47:31 +02:00
|
|
|
{
|
|
|
|
checkInterrupt();
|
2012-07-30 21:42:18 +02:00
|
|
|
|
2010-08-31 13:47:31 +02:00
|
|
|
if (done.find(path) != done.end()) return;
|
|
|
|
done.insert(path);
|
|
|
|
|
|
|
|
if (!isStorePath(path)) {
|
|
|
|
printMsg(lvlError, format("path `%1%' is not in the Nix store") % path);
|
|
|
|
invalidatePath(path);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (store.find(baseNameOf(path)) == store.end()) {
|
|
|
|
/* Check any referrers first. If we can invalidate them
|
|
|
|
first, then we can invalidate this path as well. */
|
|
|
|
bool canInvalidate = true;
|
|
|
|
PathSet referrers; queryReferrers(path, referrers);
|
|
|
|
foreach (PathSet::iterator, i, referrers)
|
|
|
|
if (*i != path) {
|
2012-10-02 21:04:59 +02:00
|
|
|
verifyPath(*i, store, done, validPaths, repair, errors);
|
2010-08-31 13:47:31 +02:00
|
|
|
if (validPaths.find(*i) != validPaths.end())
|
|
|
|
canInvalidate = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (canInvalidate) {
|
|
|
|
printMsg(lvlError, format("path `%1%' disappeared, removing from database...") % path);
|
|
|
|
invalidatePath(path);
|
2012-10-02 21:04:59 +02:00
|
|
|
} else {
|
2010-08-31 13:47:31 +02:00
|
|
|
printMsg(lvlError, format("path `%1%' disappeared, but it still has valid referrers!") % path);
|
2012-10-02 21:04:59 +02:00
|
|
|
if (repair)
|
|
|
|
try {
|
|
|
|
repairPath(path);
|
|
|
|
} catch (Error & e) {
|
|
|
|
printMsg(lvlError, format("warning: %1%") % e.msg());
|
|
|
|
errors = true;
|
|
|
|
}
|
|
|
|
else errors = true;
|
|
|
|
}
|
2012-07-30 21:42:18 +02:00
|
|
|
|
2010-08-31 13:47:31 +02:00
|
|
|
return;
|
|
|
|
}
|
2012-07-30 21:42:18 +02:00
|
|
|
|
2010-08-31 13:47:31 +02:00
|
|
|
validPaths.insert(path);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-10-02 23:13:46 +02:00
|
|
|
bool LocalStore::pathContentsGood(const Path & path)
|
|
|
|
{
|
2012-10-03 16:38:09 +02:00
|
|
|
std::map<Path, bool>::iterator i = pathContentsGoodCache.find(path);
|
|
|
|
if (i != pathContentsGoodCache.end()) return i->second;
|
|
|
|
printMsg(lvlInfo, format("checking path `%1%'...") % path);
|
2012-10-02 23:13:46 +02:00
|
|
|
ValidPathInfo info = queryPathInfo(path);
|
2012-10-03 16:38:09 +02:00
|
|
|
bool res;
|
|
|
|
if (!pathExists(path))
|
|
|
|
res = false;
|
|
|
|
else {
|
|
|
|
HashResult current = hashPath(info.hash.type, path);
|
|
|
|
Hash nullHash(htSHA256);
|
|
|
|
res = info.hash == nullHash || info.hash == current.first;
|
|
|
|
}
|
|
|
|
pathContentsGoodCache[path] = res;
|
|
|
|
if (!res) printMsg(lvlError, format("path `%1%' is corrupted or missing!") % path);
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void LocalStore::markContentsGood(const Path & path)
|
|
|
|
{
|
|
|
|
pathContentsGoodCache[path] = true;
|
2012-10-02 23:13:46 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-02-18 16:11:08 +01:00
|
|
|
/* Functions for upgrading from the pre-SQLite database. */
|
|
|
|
|
|
|
|
PathSet LocalStore::queryValidPathsOld()
|
|
|
|
{
|
|
|
|
PathSet paths;
|
2012-07-31 01:55:41 +02:00
|
|
|
Strings entries = readDirectory(settings.nixDBPath + "/info");
|
2010-02-18 16:11:08 +01:00
|
|
|
foreach (Strings::iterator, i, entries)
|
2012-07-31 01:55:41 +02:00
|
|
|
if (i->at(0) != '.') paths.insert(settings.nixStore + "/" + *i);
|
2010-02-18 16:11:08 +01:00
|
|
|
return paths;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
ValidPathInfo LocalStore::queryPathInfoOld(const Path & path)
|
|
|
|
{
|
|
|
|
ValidPathInfo res;
|
|
|
|
res.path = path;
|
|
|
|
|
|
|
|
/* Read the info file. */
|
2010-02-19 17:04:51 +01:00
|
|
|
string baseName = baseNameOf(path);
|
2012-07-31 01:55:41 +02:00
|
|
|
Path infoFile = (format("%1%/info/%2%") % settings.nixDBPath % baseName).str();
|
2010-02-18 16:11:08 +01:00
|
|
|
if (!pathExists(infoFile))
|
|
|
|
throw Error(format("path `%1%' is not valid") % path);
|
|
|
|
string info = readFile(infoFile);
|
|
|
|
|
|
|
|
/* Parse it. */
|
2012-09-19 21:43:23 +02:00
|
|
|
Strings lines = tokenizeString<Strings>(info, "\n");
|
2010-02-18 16:11:08 +01:00
|
|
|
|
|
|
|
foreach (Strings::iterator, i, lines) {
|
|
|
|
string::size_type p = i->find(':');
|
|
|
|
if (p == string::npos)
|
|
|
|
throw Error(format("corrupt line in `%1%': %2%") % infoFile % *i);
|
|
|
|
string name(*i, 0, p);
|
|
|
|
string value(*i, p + 2);
|
|
|
|
if (name == "References") {
|
2012-09-19 21:43:23 +02:00
|
|
|
Strings refs = tokenizeString<Strings>(value, " ");
|
2010-02-18 16:11:08 +01:00
|
|
|
res.references = PathSet(refs.begin(), refs.end());
|
|
|
|
} else if (name == "Deriver") {
|
|
|
|
res.deriver = value;
|
|
|
|
} else if (name == "Hash") {
|
|
|
|
res.hash = parseHashField(path, value);
|
|
|
|
} else if (name == "Registered-At") {
|
|
|
|
int n = 0;
|
|
|
|
string2Int(value, n);
|
|
|
|
res.registrationTime = n;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return res;
|
2007-08-13 13:37:39 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-02-18 14:16:59 +01:00
|
|
|
/* Upgrade from schema 5 (Nix 0.12) to schema 6 (Nix >= 0.15). */
|
|
|
|
void LocalStore::upgradeStore6()
|
2009-11-06 02:15:44 +01:00
|
|
|
{
|
2010-02-18 14:16:59 +01:00
|
|
|
printMsg(lvlError, "upgrading Nix store to new schema (this may take a while)...");
|
|
|
|
|
2010-02-24 17:30:20 +01:00
|
|
|
openDB(true);
|
2010-02-18 14:16:59 +01:00
|
|
|
|
2010-02-18 16:11:08 +01:00
|
|
|
PathSet validPaths = queryValidPathsOld();
|
2010-02-18 14:16:59 +01:00
|
|
|
|
2010-02-18 15:40:07 +01:00
|
|
|
SQLiteTxn txn(db);
|
2012-07-30 21:42:18 +02:00
|
|
|
|
2010-02-18 14:16:59 +01:00
|
|
|
foreach (PathSet::iterator, i, validPaths) {
|
2011-09-12 11:07:43 +02:00
|
|
|
addValidPath(queryPathInfoOld(*i), false);
|
2010-02-18 14:40:46 +01:00
|
|
|
std::cerr << ".";
|
|
|
|
}
|
|
|
|
|
|
|
|
std::cerr << "|";
|
2012-07-30 21:42:18 +02:00
|
|
|
|
2010-02-18 14:40:46 +01:00
|
|
|
foreach (PathSet::iterator, i, validPaths) {
|
2010-02-18 16:11:08 +01:00
|
|
|
ValidPathInfo info = queryPathInfoOld(*i);
|
2010-02-24 16:07:23 +01:00
|
|
|
unsigned long long referrer = queryValidPathId(*i);
|
|
|
|
foreach (PathSet::iterator, j, info.references)
|
|
|
|
addReference(referrer, queryValidPathId(*j));
|
2010-02-18 14:16:59 +01:00
|
|
|
std::cerr << ".";
|
|
|
|
}
|
|
|
|
|
|
|
|
std::cerr << "\n";
|
|
|
|
|
2010-02-18 15:40:07 +01:00
|
|
|
txn.commit();
|
2009-11-06 02:15:44 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-09-13 20:33:41 +02:00
|
|
|
void LocalStore::vacuumDB()
|
|
|
|
{
|
|
|
|
if (sqlite3_exec(db, "vacuum;", 0, 0, 0) != SQLITE_OK)
|
|
|
|
throwSQLiteError(db, "vacuuming SQLite database");
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2006-09-04 23:06:23 +02:00
|
|
|
}
|