mirror of
https://gitlab.alpinelinux.org/alpine/aports.git
synced 2025-08-28 18:01:21 +02:00
- instead of basing the version on a non released 2.0_rc1, based it on latest release, 1.34 - use date of latest git commit instead of the date it was generated - instead of making a git snapshot, make a patch. This way we don't need upload the source archive any place.
6641 lines
199 KiB
Diff
6641 lines
199 KiB
Diff
diff --git a/AUTHORS b/AUTHORS
|
|
index 74faa05..45ed0d7 100644
|
|
--- a/AUTHORS
|
|
+++ b/AUTHORS
|
|
@@ -1 +1,8 @@
|
|
+LINBIT Information Technologies GmbH <http://www.linbit.com>
|
|
Clifford Wolf <clifford@clifford.at>
|
|
+
|
|
+With contributions from:
|
|
+
|
|
+Lars Ellenberg <lars.ellenberg@linbit.at>
|
|
+Johannes Thoma <johannes.thoma@gmx.at>
|
|
+Dennis Schafroth <dennis@schafroth.dk>
|
|
diff --git a/ChangeLog b/ChangeLog
|
|
index 52fa2a2..9d9f791 100644
|
|
--- a/ChangeLog
|
|
+++ b/ChangeLog
|
|
@@ -1,2 +1,2 @@
|
|
Please fetch the ChangeLog directly from the subversion repository:
|
|
-svn log -v http://svn.clifford.at/csync2/
|
|
+svn log -v http://svn.linbit.com/csync2/
|
|
diff --git a/Makefile.am b/Makefile.am
|
|
index e3ec933..adbf68d 100644
|
|
--- a/Makefile.am
|
|
+++ b/Makefile.am
|
|
@@ -23,16 +23,27 @@ man_MANS = csync2.1
|
|
|
|
csync2_SOURCES = action.c cfgfile_parser.y cfgfile_scanner.l check.c \
|
|
checktxt.c csync2.c daemon.c db.c error.c getrealfn.c \
|
|
- groups.c rsync.c update.c urlencode.c conn.c prefixsubst.c
|
|
+ groups.c rsync.c update.c urlencode.c conn.c prefixsubst.c \
|
|
+ db_api.c db_sqlite.c db_sqlite2.c db_mysql.c db_postgres.c \
|
|
+ csync2.h db_api.h db_mysql.h db_postgres.h db_sqlite.h db_sqlite2.h dl.h \
|
|
+ csync2-compare \
|
|
+ csync2.1
|
|
+
|
|
+EXTRA_DIST = csync2.cfg csync2.xinetd
|
|
|
|
AM_YFLAGS = -d
|
|
BUILT_SOURCES = cfgfile_parser.h
|
|
+LIBS += -ldl
|
|
CLEANFILES = cfgfile_parser.c cfgfile_parser.h cfgfile_scanner.c \
|
|
- private_librsync private_libsqlite config.log \
|
|
- config.status config.h .deps/*.Po stamp-h1 Makefile
|
|
+ private_librsync private_libsqlite config.log
|
|
+
|
|
+DISTCLEANFILES = config.status config.h .deps/*.Po stamp-h1 Makefile Makefile.in configure
|
|
+
|
|
+dist-clean-local:
|
|
+ rm -rf autom4te.cache
|
|
|
|
-AM_CFLAGS=
|
|
-AM_LDFLAGS=
|
|
+AM_CFLAGS=$(LIBGNUTLS_CFLAGS)
|
|
+AM_LDFLAGS=$(LIBGNUTLS_LIBS)
|
|
|
|
if PRIVATE_LIBRSYNC
|
|
BUILT_SOURCES += private_librsync
|
|
@@ -41,13 +52,6 @@ if PRIVATE_LIBRSYNC
|
|
LIBS += -lprivatersync
|
|
endif
|
|
|
|
-if PRIVATE_LIBSQLITE
|
|
- BUILT_SOURCES += private_libsqlite
|
|
- AM_CFLAGS += -I$(shell test -f libsqlite.dir && cat libsqlite.dir || echo ==libsqlite==)
|
|
- AM_LDFLAGS += -L$(shell test -f libsqlite.dir && cat libsqlite.dir || echo ==libsqlite==)
|
|
- LIBS += -lprivatesqlite
|
|
-endif
|
|
-
|
|
AM_CPPFLAGS = -D'DBDIR="$(localstatedir)/lib/csync2"'
|
|
AM_CPPFLAGS += -D'ETCDIR="$(sysconfdir)"'
|
|
|
|
diff --git a/README b/README
|
|
index ed6eb6b..7dbbae1 100644
|
|
--- a/README
|
|
+++ b/README
|
|
@@ -12,7 +12,7 @@ better have a look at Unison (http://www.cis.upenn.edu/~bcpierce/unison/)
|
|
too.
|
|
|
|
See http://oss.linbit.com/ for more information on csync2. The csync2
|
|
-subversion tree can be found at http://svn.clifford.at/csync2/.
|
|
+subversion tree can be found at http://svn.linbit.com/csync2/.
|
|
|
|
|
|
Copyright
|
|
@@ -76,3 +76,25 @@ There is a csync2 mailing list:
|
|
It is recommended to subscribe to this list if you are using csync2 in
|
|
production environments.
|
|
|
|
+Building csync2
|
|
+===============
|
|
+
|
|
+You'll need the GNU autotools and a compiler toolchain (gcc) for
|
|
+building csync2.
|
|
+
|
|
+First, run the autogen.sh script:
|
|
+
|
|
+karin$ ./autogen.sh
|
|
+
|
|
+Then run configure, use ./configure --help for more options:
|
|
+
|
|
+karin$ ./configure
|
|
+
|
|
+Then run make:
|
|
+
|
|
+karin$ make
|
|
+
|
|
+csync2 should be built now. Direct any questions to the csync2 mailing list
|
|
+(see above).
|
|
+
|
|
+- Johannes
|
|
diff --git a/TODO b/TODO
|
|
index 6c02fc3..0ee83ff 100644
|
|
--- a/TODO
|
|
+++ b/TODO
|
|
@@ -1 +1,74 @@
|
|
Universal peace and a good attitude for everyone.
|
|
+
|
|
+Check for mysql/mysql.h to exist in configure.
|
|
+ Done
|
|
+
|
|
+DB abstraction: check for installed databases on configure
|
|
+ and enable/disable them for compilation.
|
|
+
|
|
+Create MySQL database if it doesn't exist.
|
|
+
|
|
+Implement table creation with schema support.
|
|
+ We don't have a schema table yet, add it when it is needed.
|
|
+
|
|
+Have check return value for asprintf () .. have a macro that does a csync_fatal
|
|
+ if there is no memory.
|
|
+
|
|
+Make database configurable.
|
|
+
|
|
+From Dennis:
|
|
+Filename column is too short, but this is due to the fact that mysql 5 only
|
|
+supports keys length of max 1000 bytes.
|
|
+So the filename+peername must be below 333 UTF characters (since mysql looks at
|
|
+worst-case when generating the tables).
|
|
+ Sort of fixed. Fields are 4096 bytes now (highest MAXPATHLEN of all
|
|
+ supported platforms) but only the first 1000 chars are unique.
|
|
+
|
|
+sqlite3:// url not working
|
|
+ It works but it needs an extra slash like in sqlite3:///var/lib/...
|
|
+ Now have a howto if slash is missing and database file is not found.
|
|
+
|
|
+-a should be stronger than configured database in /etc/csync2.cfg
|
|
+ Works now.
|
|
+
|
|
+test long filenames with mysql
|
|
+ Work now
|
|
+
|
|
+From Dennis:
|
|
+Weird characters in filename cuts off the filename at the character. I have a
|
|
+danish letter (å encoded in iso-8859-1: \370) still present in my
|
|
+now UTF-8 filesystem names.
|
|
+ Couldn't reproduce tried with German umlauts.
|
|
+
|
|
+---------------------------------------------------------------------------
|
|
+
|
|
+Test schema support for SQLite 2.
|
|
+
|
|
+Have command to pipe connection through (for SSH support for example)
|
|
+
|
|
+From Gerhard Rieger:
|
|
+If there are more than one node to sync with print nodes that are not reachable.
|
|
+ Done, test it
|
|
+
|
|
+ Segfault when syncing a file where one side is a directory and the other one
|
|
+ is a link.
|
|
+
|
|
+postgres support
|
|
+
|
|
+dl_open for all sql related calls
|
|
+ we don't want to depend on libmysql/libsqlite/whatever on install.
|
|
+ TODO: how to express that we need at least one sql client library in Debian/RPM
|
|
+
|
|
+Performance tests: when does it make sense to use mysql instead of sqlite?
|
|
+
|
|
+Have schema version table.
|
|
+
|
|
+Compile even when there is no libsqlite (mysql support only)
|
|
+
|
|
+From Martin: Provide up-to-date packages.
|
|
+ Resuse build.sh script from drbd-proxy.
|
|
+
|
|
+Build packages for all supported distros.
|
|
+
|
|
+If include <dir> is missing error message is Permission denied, which is
|
|
+ irritating.
|
|
diff --git a/action.c b/action.c
|
|
index 438db5c..9ac8126 100644
|
|
--- a/action.c
|
|
+++ b/action.c
|
|
@@ -38,14 +38,18 @@ void csync_schedule_commands(const char *filename, int islocal)
|
|
|
|
while ( (g=csync_find_next(g, filename)) ) {
|
|
for (a=g->action; a; a=a->next) {
|
|
+ if ( !islocal && a->do_local_only )
|
|
+ continue;
|
|
if ( islocal && !a->do_local )
|
|
continue;
|
|
if (!a->pattern)
|
|
goto found_matching_pattern;
|
|
- for (p=a->pattern; p; p=p->next)
|
|
+ for (p=a->pattern; p; p=p->next) {
|
|
+ int fnm_pathname = p->star_matches_slashes ? 0 : FNM_PATHNAME;
|
|
if ( !fnmatch(p->pattern, filename,
|
|
- FNM_LEADING_DIR|FNM_PATHNAME) )
|
|
+ FNM_LEADING_DIR|fnm_pathname) )
|
|
goto found_matching_pattern;
|
|
+ }
|
|
continue;
|
|
found_matching_pattern:
|
|
for (c=a->command; c; c=c->next)
|
|
@@ -69,7 +73,7 @@ void csync_run_single_command(const char *command, const char *logfile)
|
|
"SELECT filename from action WHERE command = '%s' "
|
|
"and logfile = '%s'", command, logfile)
|
|
{
|
|
- textlist_add(&tl, SQL_V[0], 0);
|
|
+ textlist_add(&tl, SQL_V(0), 0);
|
|
} SQL_END;
|
|
|
|
mark = strstr(command_clr, "%%");
|
|
@@ -107,7 +111,7 @@ void csync_run_single_command(const char *command, const char *logfile)
|
|
/* 1 */ open(logfile_clr, O_WRONLY|O_CREAT|O_APPEND, 0666);
|
|
/* 2 */ open(logfile_clr, O_WRONLY|O_CREAT|O_APPEND, 0666);
|
|
|
|
- execl("/bin/sh", "sh", "-c", real_command, 0);
|
|
+ execl("/bin/sh", "sh", "-c", real_command, NULL);
|
|
_exit(127);
|
|
}
|
|
|
|
@@ -130,7 +134,7 @@ void csync_run_commands()
|
|
SQL_BEGIN("Checking for sceduled commands",
|
|
"SELECT command, logfile FROM action GROUP BY command, logfile")
|
|
{
|
|
- textlist_add2(&tl, SQL_V[0], SQL_V[1], 0);
|
|
+ textlist_add2(&tl, SQL_V(0), SQL_V(1), 0);
|
|
} SQL_END;
|
|
|
|
for (t = tl; t != 0; t = t->next)
|
|
diff --git a/autogen.sh b/autogen.sh
|
|
index df9e797..85663ac 100755
|
|
--- a/autogen.sh
|
|
+++ b/autogen.sh
|
|
@@ -18,9 +18,9 @@
|
|
# along with this program; if not, write to the Free Software
|
|
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
|
|
|
-aclocal-1.7
|
|
+aclocal
|
|
autoheader
|
|
-automake-1.7 --add-missing --copy
|
|
+automake --add-missing --copy
|
|
autoconf
|
|
|
|
if [ "$1" = clean ]; then
|
|
@@ -32,5 +32,13 @@ if [ "$1" = clean ]; then
|
|
rm -rf config.guess config.sub
|
|
rm -rf cygwin/librsync-0.9.7.tar.gz
|
|
rm -rf cygwin/sqlite-2.8.16.tar.gz
|
|
+else
|
|
+ ./configure --prefix=/usr --localstatedir=/var --sysconfdir=/etc
|
|
+
|
|
+ echo ""
|
|
+ echo "Configured as"
|
|
+ echo "./configure --prefix=/usr --localstatedir=/var --sysconfdir=/etc"
|
|
+ echo ""
|
|
+ echo "reconfigure, if you want it different"
|
|
fi
|
|
|
|
diff --git a/cfgfile_parser.y b/cfgfile_parser.y
|
|
index 776bbcf..7f493ab 100644
|
|
--- a/cfgfile_parser.y
|
|
+++ b/cfgfile_parser.y
|
|
@@ -33,6 +33,8 @@ struct csync_nossl *csync_nossl = 0;
|
|
int csync_ignore_uid = 0;
|
|
int csync_ignore_gid = 0;
|
|
int csync_ignore_mod = 0;
|
|
+unsigned csync_lock_timeout = 12;
|
|
+char *csync_tempdir = NULL;
|
|
|
|
#ifdef __CYGWIN__
|
|
int csync_lowercyg_disable = 0;
|
|
@@ -51,11 +53,12 @@ void yyerror(char *text)
|
|
static void new_group(char *name)
|
|
{
|
|
int static autonum = 1;
|
|
+ int rc;
|
|
struct csync_group *t =
|
|
calloc(sizeof(struct csync_group), 1);
|
|
|
|
if (name == 0)
|
|
- asprintf(&name, "group_%d", autonum++);
|
|
+ rc = asprintf(&name, "group_%d", autonum++);
|
|
|
|
t->next = csync_group;
|
|
t->auto_method = -1;
|
|
@@ -106,12 +109,17 @@ static void add_patt(int patterntype, char *pattern)
|
|
}
|
|
#endif
|
|
|
|
+ /* strip trailing slashes from pattern */
|
|
for (i=strlen(pattern)-1; i>0; i--)
|
|
if (pattern[i] == '/')
|
|
pattern[i] = 0;
|
|
else
|
|
break;
|
|
|
|
+ /* if you use ** at least once anywhere in the pattern,
|
|
+ * _all_ stars in the pattern, even single ones,
|
|
+ * will match slashes. */
|
|
+ t->star_matches_slashes = !!strstr(pattern, "**");
|
|
t->isinclude = patterntype >= 1;
|
|
t->iscompare = patterntype >= 2;
|
|
t->pattern = pattern;
|
|
@@ -280,6 +288,7 @@ static void add_action_pattern(const char *pattern)
|
|
{
|
|
struct csync_group_action_pattern *t =
|
|
calloc(sizeof(struct csync_group_action_pattern), 1);
|
|
+ t->star_matches_slashes = !!strstr(pattern, "**");
|
|
t->pattern = pattern;
|
|
t->next = csync_group->action->pattern;
|
|
csync_group->action->pattern = t;
|
|
@@ -304,6 +313,28 @@ static void set_action_dolocal()
|
|
csync_group->action->do_local = 1;
|
|
}
|
|
|
|
+static void set_action_dolocal_only()
|
|
+{
|
|
+ csync_group->action->do_local = 1;
|
|
+ csync_group->action->do_local_only = 1;
|
|
+}
|
|
+
|
|
+static void set_lock_timeout(const char *timeout)
|
|
+{
|
|
+ csync_lock_timeout = atoi(timeout);
|
|
+}
|
|
+
|
|
+static void set_tempdir(const char *tempdir)
|
|
+{
|
|
+ csync_tempdir = strdup(tempdir);
|
|
+}
|
|
+
|
|
+static void set_database(const char *filename)
|
|
+{
|
|
+ if (!csync_database)
|
|
+ csync_database = strdup(filename);
|
|
+}
|
|
+
|
|
static void new_prefix(const char *pname)
|
|
{
|
|
struct csync_prefix *p =
|
|
@@ -392,10 +423,12 @@ static void disable_cygwin_lowercase_hack()
|
|
}
|
|
|
|
%token TK_BLOCK_BEGIN TK_BLOCK_END TK_STEND TK_AT TK_AUTO
|
|
-%token TK_NOSSL TK_IGNORE TK_GROUP TK_HOST TK_EXCL TK_INCL TK_COMP TK_KEY
|
|
+%token TK_NOSSL TK_IGNORE TK_GROUP TK_HOST TK_EXCL TK_INCL TK_COMP TK_KEY TK_DATABASE
|
|
%token TK_ACTION TK_PATTERN TK_EXEC TK_DOLOCAL TK_LOGFILE TK_NOCYGLOWER
|
|
%token TK_PREFIX TK_ON TK_COLON TK_POPEN TK_PCLOSE
|
|
-%token TK_BAK_DIR TK_BAK_GEN
|
|
+%token TK_BAK_DIR TK_BAK_GEN TK_DOLOCALONLY
|
|
+%token TK_TEMPDIR
|
|
+%token TK_LOCK_TIMEOUT
|
|
%token <txt> TK_STRING
|
|
|
|
%%
|
|
@@ -413,9 +446,15 @@ block:
|
|
{ }
|
|
| TK_NOSSL TK_STRING TK_STRING TK_STEND
|
|
{ new_nossl($2, $3); }
|
|
+| TK_DATABASE TK_STRING TK_STEND
|
|
+ { set_database($2); }
|
|
+| TK_TEMPDIR TK_STRING TK_STEND
|
|
+ { set_tempdir($2); }
|
|
| TK_IGNORE ignore_list TK_STEND
|
|
| TK_NOCYGLOWER TK_STEND
|
|
{ disable_cygwin_lowercase_hack(); }
|
|
+| TK_LOCK_TIMEOUT TK_STRING TK_STEND
|
|
+ { set_lock_timeout($2); }
|
|
;
|
|
|
|
ignore_list:
|
|
@@ -517,6 +556,8 @@ action_stmt:
|
|
{ set_action_logfile($2); }
|
|
| TK_DOLOCAL
|
|
{ set_action_dolocal(); }
|
|
+| TK_DOLOCALONLY
|
|
+ { set_action_dolocal_only(); }
|
|
;
|
|
|
|
action_pattern_list:
|
|
diff --git a/cfgfile_scanner.l b/cfgfile_scanner.l
|
|
index 77daf5f..5e93f7c 100644
|
|
--- a/cfgfile_scanner.l
|
|
+++ b/cfgfile_scanner.l
|
|
@@ -25,9 +25,13 @@
|
|
#define MAX_INCLUDE_DEPTH 10
|
|
YY_BUFFER_STATE include_stack[MAX_INCLUDE_DEPTH];
|
|
int include_stack_ptr = 0;
|
|
+
|
|
+#define YY_NO_INPUT 1
|
|
+#define YY_NO_UNPUT 1
|
|
%}
|
|
|
|
%option noyywrap yylineno
|
|
+%option nounput
|
|
%x STRING INCL
|
|
|
|
%%
|
|
@@ -42,6 +46,7 @@ int include_stack_ptr = 0;
|
|
|
|
"nossl" { return TK_NOSSL; }
|
|
"ignore" { return TK_IGNORE; }
|
|
+"database" { return TK_DATABASE; }
|
|
|
|
"group" { return TK_GROUP; }
|
|
"host" { return TK_HOST; }
|
|
@@ -56,10 +61,13 @@ int include_stack_ptr = 0;
|
|
"exec" { return TK_EXEC; }
|
|
"logfile" { return TK_LOGFILE; }
|
|
"do-local" { return TK_DOLOCAL; }
|
|
+"do-local-only" { return TK_DOLOCALONLY; }
|
|
|
|
"prefix" { return TK_PREFIX; }
|
|
"on" { return TK_ON; }
|
|
|
|
+"lock-timeout" { return TK_LOCK_TIMEOUT; }
|
|
+"tempdir" { return TK_TEMPDIR; }
|
|
"backup-directory" { return TK_BAK_DIR; }
|
|
"backup-generations" { return TK_BAK_GEN; }
|
|
|
|
diff --git a/check.c b/check.c
|
|
index 360abd3..c5b9f32 100644
|
|
--- a/check.c
|
|
+++ b/check.c
|
|
@@ -99,15 +99,20 @@ void csync_mark(const char *file, const char *thispeer, const char *peerfilter)
|
|
|
|
csync_debug(1, "Marking file as dirty: %s\n", file);
|
|
for (pl_idx=0; pl[pl_idx].peername; pl_idx++)
|
|
- if (!peerfilter || !strcmp(peerfilter, pl[pl_idx].peername))
|
|
+ if (!peerfilter || !strcmp(peerfilter, pl[pl_idx].peername)) {
|
|
+ SQL("Deleting old dirty file entries",
|
|
+ "DELETE FROM dirty WHERE filename = '%s' AND peername = '%s'",
|
|
+ url_encode(file),
|
|
+ url_encode(pl[pl_idx].peername));
|
|
+
|
|
SQL("Marking File Dirty",
|
|
- "%s INTO dirty (filename, force, myname, peername) "
|
|
+ "INSERT INTO dirty (filename, forced, myname, peername) "
|
|
"VALUES ('%s', %s, '%s', '%s')",
|
|
- csync_new_force ? "REPLACE" : "INSERT",
|
|
url_encode(file),
|
|
csync_new_force ? "1" : "0",
|
|
url_encode(pl[pl_idx].myname),
|
|
url_encode(pl[pl_idx].peername));
|
|
+ }
|
|
|
|
free(pl);
|
|
}
|
|
@@ -122,21 +127,83 @@ int csync_check_pure(const char *filename)
|
|
if (!csync_lowercyg_disable)
|
|
return 0;
|
|
#endif
|
|
-
|
|
struct stat sbuf;
|
|
- int i=0;
|
|
+ int dir_len = 0;
|
|
+ int i;
|
|
+ int same_len;
|
|
+
|
|
+ /* single entry last query cache
|
|
+ * to speed up checks from deep subdirs */
|
|
+ static struct {
|
|
+ /* store inclusive trailing slash for prefix match */
|
|
+ char *path;
|
|
+ /* strlen(path) */
|
|
+ int len;
|
|
+ /* cached return value */
|
|
+ int has_symlink;
|
|
+ } cached;
|
|
+
|
|
+ for (i = 0; filename[i]; i++)
|
|
+ if (filename[i] == '/')
|
|
+ dir_len = i+1;
|
|
+
|
|
+ if (dir_len <= 1) /* '/' a symlink? hardly. */
|
|
+ return 0;
|
|
+
|
|
+ /* identical prefix part */
|
|
+ for (i = 0; i < dir_len && i < cached.len; i++)
|
|
+ if (filename[i] != cached.path[i])
|
|
+ break;
|
|
+
|
|
+ /* backtrack to slash */
|
|
+ for (--i; i >= 0 && cached.path[i] != '/'; --i);
|
|
+ ;
|
|
|
|
- while (filename[i]) i++;
|
|
+ same_len = i+1;
|
|
+
|
|
+ csync_debug(3, " check: %s %u, %s %u, %u.\n", filename, dir_len, cached.path, cached.len, same_len);
|
|
+ /* exact match? */
|
|
+ if (dir_len == same_len && same_len == cached.len)
|
|
+ return cached.has_symlink;
|
|
|
|
{ /* new block for myfilename[] */
|
|
- char myfilename[i+1];
|
|
- memcpy(myfilename, filename, i+1);
|
|
- while (1) {
|
|
- while (myfilename[i] != '/')
|
|
- if (--i <= 0) return 0;
|
|
+ char myfilename[dir_len+1];
|
|
+ char *to_be_cached;
|
|
+ int has_symlink = 0;
|
|
+ memcpy(myfilename, filename, dir_len);
|
|
+ myfilename[dir_len] = '\0';
|
|
+ to_be_cached = strdup(myfilename);
|
|
+ i = dir_len-1;
|
|
+ while (i) {
|
|
+ for (; i && myfilename[i] != '/'; --i)
|
|
+ ;
|
|
+
|
|
+ if (i <= 1)
|
|
+ break;
|
|
+
|
|
+ if (i+1 == same_len) {
|
|
+ if (same_len == cached.len) {
|
|
+ /* exact match */
|
|
+ has_symlink = cached.has_symlink;
|
|
+ break;
|
|
+ } else if (!cached.has_symlink)
|
|
+ /* prefix of something 'pure' */
|
|
+ break;
|
|
+ }
|
|
+
|
|
myfilename[i]=0;
|
|
- if ( lstat_strict(prefixsubst(myfilename), &sbuf) || S_ISLNK(sbuf.st_mode) ) return 1;
|
|
+ if (lstat_strict(prefixsubst(myfilename), &sbuf) || S_ISLNK(sbuf.st_mode)) {
|
|
+ has_symlink = 1;
|
|
+ break;
|
|
+ }
|
|
}
|
|
+ if (to_be_cached) { /* strdup can fail. So what. */
|
|
+ free(cached.path);
|
|
+ cached.path = to_be_cached;
|
|
+ cached.len = dir_len;
|
|
+ cached.has_symlink = has_symlink;
|
|
+ }
|
|
+ return has_symlink;
|
|
}
|
|
}
|
|
|
|
@@ -148,18 +215,22 @@ void csync_check_del(const char *file, int recursive, int init_run)
|
|
|
|
if ( recursive ) {
|
|
if ( !strcmp(file, "/") )
|
|
- asprintf(&where_rec, "or 1");
|
|
+ ASPRINTF(&where_rec, "OR 1=1");
|
|
else
|
|
- asprintf(&where_rec, "or (filename > '%s/' "
|
|
- "and filename < '%s0')",
|
|
- url_encode(file), url_encode(file));
|
|
+ ASPRINTF(&where_rec, "UNION ALL SELECT filename from file where filename > '%s/' "
|
|
+ "and filename < '%s0'",
|
|
+ url_encode(file), url_encode(file));
|
|
}
|
|
|
|
SQL_BEGIN("Checking for removed files",
|
|
"SELECT filename from file where "
|
|
"filename = '%s' %s ORDER BY filename", url_encode(file), where_rec)
|
|
{
|
|
- const char *filename = url_decode(SQL_V[0]);
|
|
+ const char *filename = url_decode(SQL_V(0));
|
|
+
|
|
+ if (!csync_match_file(filename))
|
|
+ continue;
|
|
+
|
|
if ( lstat_strict(prefixsubst(filename), &st) != 0 || csync_check_pure(filename) )
|
|
textlist_add(&tl, filename, 0);
|
|
} SQL_END;
|
|
@@ -231,7 +302,7 @@ int csync_check_mod(const char *file, int recursive, int ignnoent, int init_run)
|
|
"filename = '%s'", url_encode(file))
|
|
{
|
|
if ( !csync_cmpchecktxt(checktxt,
|
|
- url_decode(SQL_V[0])) ) {
|
|
+ url_decode(SQL_V(0))) ) {
|
|
csync_debug(2, "File has changed: %s\n", file);
|
|
this_is_dirty = 1;
|
|
}
|
|
@@ -243,6 +314,10 @@ int csync_check_mod(const char *file, int recursive, int ignnoent, int init_run)
|
|
} SQL_END;
|
|
|
|
if ( this_is_dirty && !csync_compare_mode ) {
|
|
+ SQL("Deleting old file entry",
|
|
+ "DELETE FROM file WHERE filename = '%s'",
|
|
+ url_encode(file));
|
|
+
|
|
SQL("Adding or updating file entry",
|
|
"INSERT INTO file (filename, checktxt) "
|
|
"VALUES ('%s', '%s')",
|
|
diff --git a/configure.ac b/configure.ac
|
|
index 6ec6136..8989a33 100644
|
|
--- a/configure.ac
|
|
+++ b/configure.ac
|
|
@@ -17,15 +17,15 @@
|
|
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
|
|
|
# Process this file with autoconf to produce a configure script.
|
|
-AC_INIT(csync2, 1.34, clifford@clifford.at)
|
|
+AC_INIT(csync2, 2.0rc1, csync2@lists.linbit.com)
|
|
AM_INIT_AUTOMAKE
|
|
|
|
AC_CONFIG_SRCDIR(csync2.c)
|
|
AM_CONFIG_HEADER(config.h)
|
|
|
|
# Use /etc and /var instead of $prefix/...
|
|
-test "$localstatedir" = '${prefix}/var' && localstatedir=/var
|
|
-test "$sysconfdir" = '${prefix}/etc' && sysconfdir=/etc
|
|
+# test "$localstatedir" = '${prefix}/var' && localstatedir=/var
|
|
+# test "$sysconfdir" = '${prefix}/etc' && sysconfdir=/etc
|
|
|
|
# Checks for programs.
|
|
AC_PROG_CC
|
|
@@ -33,6 +33,9 @@ AC_PROG_INSTALL
|
|
AC_PROG_YACC
|
|
AM_PROG_LEX
|
|
|
|
+# check for large file support
|
|
+AC_SYS_LARGEFILE
|
|
+
|
|
# Check for librsync.
|
|
AC_ARG_WITH([librsync-source],
|
|
AS_HELP_STRING([--with-librsync-source=source-tar-file],
|
|
@@ -42,35 +45,78 @@ AC_ARG_WITH([librsync-source],
|
|
)
|
|
AM_CONDITIONAL([PRIVATE_LIBRSYNC], [test -n "$librsync_source_file"])
|
|
|
|
-# Check for libsqlite.
|
|
-AC_ARG_WITH([libsqlite-source],
|
|
- AS_HELP_STRING([--with-libsqlite-source=source-tar-file],
|
|
- [build this libsqlite and link statically against it (hack! hack!)]),
|
|
- AC_SUBST([libsqlite_source_file], $withval),
|
|
- AC_CHECK_LIB([sqlite], [sqlite_exec], , [AC_MSG_ERROR(libsqlite is required)])
|
|
-)
|
|
-AM_CONDITIONAL([PRIVATE_LIBSQLITE], [test -n "$libsqlite_source_file"])
|
|
+AC_ARG_ENABLE([sqlite],
|
|
+ [AC_HELP_STRING([--enable-sqlite],
|
|
+ [enable/disable sqlite 2 support (default is disabled)])],
|
|
+ [], [ enable_sqlite=no ])
|
|
+
|
|
+if test "$enable_sqlite" == yes
|
|
+then
|
|
+ AC_CHECK_HEADERS([sqlite.h], , [AC_MSG_ERROR([[SQLite header not found; install libsqlite-dev and dependencies for SQLite 2 support]])])
|
|
+
|
|
+ AC_DEFINE([HAVE_SQLITE], 1, [Define if sqlite 2 support is wanted])
|
|
+fi
|
|
+
|
|
+AC_ARG_ENABLE([sqlite3],
|
|
+ [AC_HELP_STRING([--disable-sqlite3],
|
|
+ [enable/disable sqlite3 support (default is enabled)])],
|
|
+ [], [ enable_sqlite3=yes ])
|
|
+
|
|
+if test "$enable_sqlite3" == yes
|
|
+then
|
|
+ AC_CHECK_HEADERS([sqlite3.h], , [AC_MSG_ERROR([[SQLite header not found; install libsqlite3-dev and dependencies for SQLite 3 support]])])
|
|
+
|
|
+ AC_DEFINE([HAVE_SQLITE3], 1, [Define if sqlite3 support is wanted])
|
|
+fi
|
|
|
|
AC_ARG_ENABLE([gnutls],
|
|
- [AC_HELP_STRING([--disable-gnutls],
|
|
- [enable/disable GNU TLS support (default is enabled)])],
|
|
+ [AS_HELP_STRING([--disable-gnutls],[enable/disable GNU TLS support (default is enabled)])],
|
|
[], [ enable_gnutls=yes ])
|
|
|
|
if test "$enable_gnutls" != no
|
|
then
|
|
+ PKG_PROG_PKG_CONFIG
|
|
+ PKG_CHECK_MODULES([LIBGNUTLS], [gnutls >= 2.6.0], [
|
|
+ AC_DEFINE([HAVE_LIBGNUTLS], 1, [Define to 1 when using GNU TLS library])
|
|
+ ])
|
|
+fi
|
|
|
|
- # Check for gnuTLS.
|
|
- AM_PATH_LIBGNUTLS(1.0.0, , [ AC_MSG_ERROR([[gnutls not found; install gnutls, gnutls-openssl and libtasn1 packages for your system or run configure with --disable-gnutls]]) ])
|
|
+AC_ARG_ENABLE([mysql],
|
|
+ [AC_HELP_STRING([--enable-mysql],
|
|
+ [enable/disable MySQL support (default is disabled)])],
|
|
+ [], [ enable_mysql=no ])
|
|
|
|
+AC_ARG_ENABLE([postgres],
|
|
+ [AC_HELP_STRING([--enable-postgres],
|
|
+ [enable/disable Postgres support (default is disabled)])],
|
|
+ [], [ enable_postgres=no ])
|
|
+
|
|
+if test "$enable_mysql" == yes
|
|
+then
|
|
+ # Check for mysql.
|
|
# This is a bloody hack for fedora core
|
|
- CFLAGS="$CFLAGS $LIBGNUTLS_CFLAGS"
|
|
- LIBS="$LIBS $LIBGNUTLS_LIBS -ltasn1"
|
|
+ CFLAGS="$CFLAGS `mysql_config --cflags`"
|
|
+
|
|
+ # Check MySQL development header
|
|
+ AC_CHECK_HEADERS([mysql/mysql.h], , [AC_MSG_ERROR([[mysql header not found; install mysql-devel and dependencies for MySQL Support]])])
|
|
|
|
- # Check gnuTLS SSL compatibility lib.
|
|
- AC_CHECK_LIB([gnutls-openssl], [SSL_new], , [AC_MSG_ERROR([[gnutls-openssl not found; install gnutls, gnutls-openssl and libtasn1 packages for your system or run configure with --disable-gnutls]])])
|
|
+ AC_DEFINE([HAVE_MYSQL], 1, [Define if mysql support is wanted])
|
|
+fi
|
|
+
|
|
+if test "$enable_postgres" == yes
|
|
+then
|
|
+ AC_CHECK_HEADERS([postgresql/libpq-fe.h], , [AC_MSG_ERROR([[postgres header not found; install libpq-dev and dependencies for Postgres support]])])
|
|
|
|
+ AC_DEFINE([HAVE_POSTGRES], 1, [Define if postgres support is wanted])
|
|
+fi
|
|
+
|
|
+# at least one db backend must be configured.
|
|
+
|
|
+if test "$enable_postgres" != yes && test "$enable_mysql" != yes &&
|
|
+ test "$enable_sqlite3" != yes && test "$enable_sqlite" != yes
|
|
+then
|
|
+ AC_MSG_ERROR([No database backend configured. Please enable either sqlite, sqlite3, mysql or postgres.])
|
|
fi
|
|
|
|
AC_CONFIG_FILES([Makefile])
|
|
AC_OUTPUT
|
|
-
|
|
diff --git a/conn.c b/conn.c
|
|
index 6f8dfdc..8dda10d 100644
|
|
--- a/conn.c
|
|
+++ b/conn.c
|
|
@@ -30,52 +30,77 @@
|
|
#include <netdb.h>
|
|
#include <errno.h>
|
|
|
|
-#ifdef HAVE_LIBGNUTLS_OPENSSL
|
|
+#ifdef HAVE_LIBGNUTLS
|
|
# include <gnutls/gnutls.h>
|
|
-# include <gnutls/openssl.h>
|
|
+# include <gnutls/x509.h>
|
|
#endif
|
|
|
|
int conn_fd_in = -1;
|
|
int conn_fd_out = -1;
|
|
int conn_clisok = 0;
|
|
|
|
-#ifdef HAVE_LIBGNUTLS_OPENSSL
|
|
+#ifdef HAVE_LIBGNUTLS
|
|
int csync_conn_usessl = 0;
|
|
|
|
-SSL_METHOD *conn_ssl_meth;
|
|
-SSL_CTX *conn_ssl_ctx;
|
|
-SSL *conn_ssl;
|
|
+static gnutls_session_t conn_tls_session;
|
|
+static gnutls_certificate_credentials_t conn_x509_cred;
|
|
#endif
|
|
|
|
+
|
|
+/* getaddrinfo stuff mostly copied from its manpage */
|
|
+int conn_connect(const char *peername)
|
|
+{
|
|
+ struct addrinfo hints;
|
|
+ struct addrinfo *result, *rp;
|
|
+ int sfd, s;
|
|
+
|
|
+ /* Obtain address(es) matching host/port */
|
|
+ memset(&hints, 0, sizeof(struct addrinfo));
|
|
+ hints.ai_family = AF_UNSPEC; /* Allow IPv4 or IPv6 */
|
|
+ hints.ai_socktype = SOCK_STREAM;
|
|
+ hints.ai_flags = 0;
|
|
+ hints.ai_protocol = 0; /* Any protocol */
|
|
+
|
|
+ s = getaddrinfo(peername, csync_port, &hints, &result);
|
|
+ if (s != 0) {
|
|
+ csync_debug(1, "Cannot resolve peername, getaddrinfo: %s\n", gai_strerror(s));
|
|
+ return -1;
|
|
+ }
|
|
+
|
|
+ /* getaddrinfo() returns a list of address structures.
|
|
+ Try each address until we successfully connect(2).
|
|
+ If socket(2) (or connect(2)) fails, we (close the socket
|
|
+ and) try the next address. */
|
|
+
|
|
+ for (rp = result; rp != NULL; rp = rp->ai_next) {
|
|
+ sfd = socket(rp->ai_family, rp->ai_socktype, rp->ai_protocol);
|
|
+ if (sfd == -1)
|
|
+ continue;
|
|
+
|
|
+ if (connect(sfd, rp->ai_addr, rp->ai_addrlen) != -1)
|
|
+ break; /* Success */
|
|
+
|
|
+ close(sfd);
|
|
+ }
|
|
+ freeaddrinfo(result); /* No longer needed */
|
|
+
|
|
+ if (rp == NULL) /* No address succeeded */
|
|
+ return -1;
|
|
+
|
|
+ return sfd;
|
|
+}
|
|
+
|
|
int conn_open(const char *peername)
|
|
{
|
|
- struct sockaddr_in sin;
|
|
- struct hostent *hp;
|
|
int on = 1;
|
|
|
|
- hp = gethostbyname(peername);
|
|
- if ( ! hp ) {
|
|
- csync_debug(1, "Can't resolve peername.\n");
|
|
- return -1;
|
|
- }
|
|
-
|
|
- conn_fd_in = socket(hp->h_addrtype, SOCK_STREAM, 0);
|
|
+ conn_fd_in = conn_connect(peername);
|
|
if (conn_fd_in < 0) {
|
|
csync_debug(1, "Can't create socket.\n");
|
|
return -1;
|
|
}
|
|
|
|
- sin.sin_family = hp->h_addrtype;
|
|
- bcopy(hp->h_addr, &sin.sin_addr, hp->h_length);
|
|
- sin.sin_port = htons(csync_port);
|
|
-
|
|
- if (connect(conn_fd_in, (struct sockaddr *)&sin, sizeof (sin)) < 0) {
|
|
- csync_debug(1, "Can't connect to remote host.\n");
|
|
- close(conn_fd_in); conn_fd_in = -1;
|
|
- return -1;
|
|
- }
|
|
-
|
|
- if (setsockopt(conn_fd_in, IPPROTO_TCP, TCP_NODELAY, &on, sizeof(on) ) < 0 ) {
|
|
+ if (setsockopt(conn_fd_in, IPPROTO_TCP, TCP_NODELAY, &on, sizeof(on) ) < 0) {
|
|
csync_debug(1, "Can't set TCP_NODELAY option on TCP socket.\n");
|
|
close(conn_fd_in); conn_fd_in = -1;
|
|
return -1;
|
|
@@ -83,10 +108,9 @@ int conn_open(const char *peername)
|
|
|
|
conn_fd_out = conn_fd_in;
|
|
conn_clisok = 1;
|
|
-#ifdef HAVE_LIBGNUTLS_OPENSSL
|
|
+#ifdef HAVE_LIBGNUTLS
|
|
csync_conn_usessl = 0;
|
|
#endif
|
|
-
|
|
return 0;
|
|
}
|
|
|
|
@@ -97,12 +121,13 @@ int conn_set(int infd, int outfd)
|
|
conn_fd_in = infd;
|
|
conn_fd_out = outfd;
|
|
conn_clisok = 1;
|
|
-#ifdef HAVE_LIBGNUTLS_OPENSSL
|
|
+#ifdef HAVE_LIBGNUTLS
|
|
csync_conn_usessl = 0;
|
|
#endif
|
|
|
|
// when running in server mode, this has been done already
|
|
// in csync2.c with more restrictive error handling..
|
|
+ // FIXME don't even try in "ssh" mode
|
|
if ( setsockopt(conn_fd_out, IPPROTO_TCP, TCP_NODELAY, &on, (socklen_t) sizeof(on)) < 0 )
|
|
csync_debug(1, "Can't set TCP_NODELAY option on TCP socket.\n");
|
|
|
|
@@ -110,43 +135,106 @@ int conn_set(int infd, int outfd)
|
|
}
|
|
|
|
|
|
-#ifdef HAVE_LIBGNUTLS_OPENSSL
|
|
+#ifdef HAVE_LIBGNUTLS
|
|
+
|
|
+static void ssl_log(int level, const char* msg)
|
|
+{ csync_debug(level, "%s", msg); }
|
|
|
|
-char *ssl_keyfile = ETCDIR "/csync2_ssl_key.pem";
|
|
-char *ssl_certfile = ETCDIR "/csync2_ssl_cert.pem";
|
|
+static const char *ssl_keyfile = ETCDIR "/csync2_ssl_key.pem";
|
|
+static const char *ssl_certfile = ETCDIR "/csync2_ssl_cert.pem";
|
|
|
|
int conn_activate_ssl(int server_role)
|
|
{
|
|
- static int sslinit = 0;
|
|
+ gnutls_alert_description_t alrt;
|
|
+ int err;
|
|
|
|
if (csync_conn_usessl)
|
|
return 0;
|
|
|
|
- if (!sslinit) {
|
|
- SSL_load_error_strings();
|
|
- SSL_library_init();
|
|
- sslinit=1;
|
|
- }
|
|
+ gnutls_global_init();
|
|
+ gnutls_global_set_log_function(ssl_log);
|
|
+ gnutls_global_set_log_level(10);
|
|
|
|
- conn_ssl_meth = (server_role ? SSLv23_server_method : SSLv23_client_method)();
|
|
- conn_ssl_ctx = SSL_CTX_new(conn_ssl_meth);
|
|
+ gnutls_certificate_allocate_credentials(&conn_x509_cred);
|
|
|
|
- if (SSL_CTX_use_PrivateKey_file(conn_ssl_ctx, ssl_keyfile, SSL_FILETYPE_PEM) <= 0)
|
|
- csync_fatal("SSL: failed to use key file %s.\n", ssl_keyfile);
|
|
+ err = gnutls_certificate_set_x509_key_file(conn_x509_cred, ssl_certfile, ssl_keyfile, GNUTLS_X509_FMT_PEM);
|
|
+ if(err != GNUTLS_E_SUCCESS) {
|
|
+ gnutls_certificate_free_credentials(conn_x509_cred);
|
|
+ gnutls_global_deinit();
|
|
|
|
- if (SSL_CTX_use_certificate_file(conn_ssl_ctx, ssl_certfile, SSL_FILETYPE_PEM) <= 0)
|
|
- csync_fatal("SSL: failed to use certificate file %s.\n", ssl_certfile);
|
|
+ csync_fatal(
|
|
+ "SSL: failed to use key file %s and/or certificate file %s: %s (%s)\n",
|
|
+ ssl_keyfile,
|
|
+ ssl_certfile,
|
|
+ gnutls_strerror(err),
|
|
+ gnutls_strerror_name(err)
|
|
+ );
|
|
+ }
|
|
|
|
- if (! (conn_ssl = SSL_new(conn_ssl_ctx)) )
|
|
- csync_fatal("Creating a new SSL handle failed.\n");
|
|
+ if(server_role) {
|
|
+ gnutls_certificate_free_cas(conn_x509_cred);
|
|
|
|
- gnutls_certificate_server_set_request(conn_ssl->gnutls_state, GNUTLS_CERT_REQUIRE);
|
|
+ if(gnutls_certificate_set_x509_trust_file(conn_x509_cred, ssl_certfile, GNUTLS_X509_FMT_PEM) < 1) {
|
|
+ gnutls_certificate_free_credentials(conn_x509_cred);
|
|
+ gnutls_global_deinit();
|
|
|
|
- SSL_set_rfd(conn_ssl, conn_fd_in);
|
|
- SSL_set_wfd(conn_ssl, conn_fd_out);
|
|
+ csync_fatal(
|
|
+ "SSL: failed to use certificate file %s as CA.\n",
|
|
+ ssl_certfile
|
|
+ );
|
|
+ }
|
|
+ } else
|
|
+ gnutls_certificate_free_ca_names(conn_x509_cred);
|
|
+
|
|
+ gnutls_init(&conn_tls_session, (server_role ? GNUTLS_SERVER : GNUTLS_CLIENT));
|
|
+ gnutls_priority_set_direct(conn_tls_session, "PERFORMANCE", NULL);
|
|
+ gnutls_credentials_set(conn_tls_session, GNUTLS_CRD_CERTIFICATE, conn_x509_cred);
|
|
|
|
- if ( (server_role ? SSL_accept : SSL_connect)(conn_ssl) < 1 )
|
|
- csync_fatal("Establishing SSL connection failed.\n");
|
|
+ if(server_role) {
|
|
+ gnutls_certificate_send_x509_rdn_sequence(conn_tls_session, 0);
|
|
+ gnutls_certificate_server_set_request(conn_tls_session, GNUTLS_CERT_REQUIRE);
|
|
+ }
|
|
+
|
|
+ gnutls_transport_set_ptr2(
|
|
+ conn_tls_session,
|
|
+ (gnutls_transport_ptr_t)conn_fd_in,
|
|
+ (gnutls_transport_ptr_t)conn_fd_out
|
|
+ );
|
|
+
|
|
+ err = gnutls_handshake(conn_tls_session);
|
|
+ switch(err) {
|
|
+ case GNUTLS_E_SUCCESS:
|
|
+ break;
|
|
+
|
|
+ case GNUTLS_E_WARNING_ALERT_RECEIVED:
|
|
+ alrt = gnutls_alert_get(conn_tls_session);
|
|
+ fprintf(
|
|
+ csync_debug_out,
|
|
+ "SSL: warning alert received from peer: %d (%s).\n",
|
|
+ alrt, gnutls_alert_get_name(alrt)
|
|
+ );
|
|
+ break;
|
|
+
|
|
+ case GNUTLS_E_FATAL_ALERT_RECEIVED:
|
|
+ alrt = gnutls_alert_get(conn_tls_session);
|
|
+ fprintf(
|
|
+ csync_debug_out,
|
|
+ "SSL: fatal alert received from peer: %d (%s).\n",
|
|
+ alrt, gnutls_alert_get_name(alrt)
|
|
+ );
|
|
+
|
|
+ default:
|
|
+ gnutls_bye(conn_tls_session, GNUTLS_SHUT_RDWR);
|
|
+ gnutls_deinit(conn_tls_session);
|
|
+ gnutls_certificate_free_credentials(conn_x509_cred);
|
|
+ gnutls_global_deinit();
|
|
+
|
|
+ csync_fatal(
|
|
+ "SSL: handshake failed: %s (%s)\n",
|
|
+ gnutls_strerror(err),
|
|
+ gnutls_strerror_name(err)
|
|
+ );
|
|
+ }
|
|
|
|
csync_conn_usessl = 1;
|
|
|
|
@@ -155,15 +243,15 @@ int conn_activate_ssl(int server_role)
|
|
|
|
int conn_check_peer_cert(const char *peername, int callfatal)
|
|
{
|
|
- const X509 *peercert;
|
|
+ const gnutls_datum_t *peercerts;
|
|
+ unsigned npeercerts;
|
|
int i, cert_is_ok = -1;
|
|
|
|
if (!csync_conn_usessl)
|
|
return 1;
|
|
|
|
- peercert = SSL_get_peer_certificate(conn_ssl);
|
|
-
|
|
- if (!peercert || peercert->size <= 0) {
|
|
+ peercerts = gnutls_certificate_get_peers(conn_tls_session, &npeercerts);
|
|
+ if(peercerts == NULL || npeercerts == 0) {
|
|
if (callfatal)
|
|
csync_fatal("Peer did not provide an SSL X509 cetrificate.\n");
|
|
csync_debug(1, "Peer did not provide an SSL X509 cetrificate.\n");
|
|
@@ -171,17 +259,17 @@ int conn_check_peer_cert(const char *peername, int callfatal)
|
|
}
|
|
|
|
{
|
|
- char certdata[peercert->size*2 + 1];
|
|
+ char certdata[2*peercerts[0].size + 1];
|
|
|
|
- for (i=0; i<peercert->size; i++)
|
|
- sprintf(certdata+i*2, "%02X", peercert->data[i]);
|
|
- certdata[peercert->size*2] = 0;
|
|
+ for (i=0; i<peercerts[0].size; i++)
|
|
+ sprintf(&certdata[2*i], "%02X", peercerts[0].data[i]);
|
|
+ certdata[2*i] = 0;
|
|
|
|
SQL_BEGIN("Checking peer x509 certificate.",
|
|
"SELECT certdata FROM x509_cert WHERE peername = '%s'",
|
|
url_encode(peername))
|
|
{
|
|
- if (!strcmp(SQL_V[0], certdata))
|
|
+ if (!strcmp(SQL_V(0), certdata))
|
|
cert_is_ok = 1;
|
|
else
|
|
cert_is_ok = 0;
|
|
@@ -215,14 +303,19 @@ int conn_check_peer_cert(const char *peername, int callfatal)
|
|
return 1;
|
|
}
|
|
|
|
-#endif /* HAVE_LIBGNUTLS_OPENSSL */
|
|
+#endif /* HAVE_LIBGNUTLS */
|
|
|
|
int conn_close()
|
|
{
|
|
if ( !conn_clisok ) return -1;
|
|
|
|
-#ifdef HAVE_LIBGNUTLS_OPENSSL
|
|
- if ( csync_conn_usessl ) SSL_free(conn_ssl);
|
|
+#ifdef HAVE_LIBGNUTLS
|
|
+ if ( csync_conn_usessl ) {
|
|
+ gnutls_bye(conn_tls_session, GNUTLS_SHUT_RDWR);
|
|
+ gnutls_deinit(conn_tls_session);
|
|
+ gnutls_certificate_free_credentials(conn_x509_cred);
|
|
+ gnutls_global_deinit();
|
|
+ }
|
|
#endif
|
|
|
|
if ( conn_fd_in != conn_fd_out) close(conn_fd_in);
|
|
@@ -237,9 +330,9 @@ int conn_close()
|
|
|
|
static inline int READ(void *buf, size_t count)
|
|
{
|
|
-#ifdef HAVE_LIBGNUTLS_OPENSSL
|
|
+#ifdef HAVE_LIBGNUTLS
|
|
if (csync_conn_usessl)
|
|
- return SSL_read(conn_ssl, buf, count);
|
|
+ return gnutls_record_recv(conn_tls_session, buf, count);
|
|
else
|
|
#endif
|
|
return read(conn_fd_in, buf, count);
|
|
@@ -249,9 +342,9 @@ static inline int WRITE(const void *buf, size_t count)
|
|
{
|
|
static int n, total;
|
|
|
|
-#ifdef HAVE_LIBGNUTLS_OPENSSL
|
|
+#ifdef HAVE_LIBGNUTLS
|
|
if (csync_conn_usessl)
|
|
- return SSL_write(conn_ssl, buf, count);
|
|
+ return gnutls_record_send(conn_tls_session, buf, count);
|
|
else
|
|
#endif
|
|
{
|
|
@@ -302,7 +395,7 @@ int conn_raw_read(void *buf, size_t count)
|
|
return 0;
|
|
}
|
|
|
|
-void conn_debug(const char *name, const unsigned char*buf, size_t count)
|
|
+void conn_debug(const char *name, const char*buf, size_t count)
|
|
{
|
|
int i;
|
|
|
|
@@ -365,9 +458,9 @@ void conn_printf(const char *fmt, ...)
|
|
conn_write(buffer, size);
|
|
}
|
|
|
|
-int conn_gets(char *s, int size)
|
|
+size_t conn_gets(char *s, size_t size)
|
|
{
|
|
- int i=0;
|
|
+ size_t i=0;
|
|
|
|
while (i<size-1) {
|
|
int rc = conn_raw_read(s+i, 1);
|
|
diff --git a/contrib/csync2id.pl b/contrib/csync2id.pl
|
|
new file mode 100644
|
|
index 0000000..1b3a5fa
|
|
--- /dev/null
|
|
+++ b/contrib/csync2id.pl
|
|
@@ -0,0 +1,359 @@
|
|
+#!/usr/bin/perl -w
|
|
+# Copyright: telegraaf (NL)
|
|
+# Author: ard@telegraafnet.nl
|
|
+# License: GPL v2 or higher
|
|
+use strict;
|
|
+use Linux::Inotify2;
|
|
+use Data::Dumper;
|
|
+use File::Find;
|
|
+use POSIX qw(uname :sys_wait_h);
|
|
+use Sys::Syslog;
|
|
+use Net::Server::Daemonize qw(daemonize);
|
|
+use IO::Select;
|
|
+use Fcntl;
|
|
+
|
|
+
|
|
+my $program="csync2id";
|
|
+my $daemonize=1;
|
|
+my $usesyslog=1;
|
|
+my $pidfile='/var/run/csync2id.pid';
|
|
+my $pidfileboot='/var/run/csync2id.boot.pid';
|
|
+
|
|
+################################################################################
|
|
+# Config items
|
|
+# Overridden by /etc/csync2id.cfg
|
|
+# Normal config in /etc/csync2id.cfg:
|
|
+#
|
|
+# @::dirs=qw( /data1 /data2 );
|
|
+# 1;
|
|
+#
|
|
+# csyncdirhint: preferred hint command for directories (a single directory name
|
|
+# will be added)
|
|
+# csyncfilehint: preferred hint command for files (at most $x filenames will be appended)
|
|
+# csynccheck: preferred command scheduled right after the hint, or after a timeout
|
|
+# csyncupdate: preferred command scheduled right after the check
|
|
+# debug: log debug lines
|
|
+# statsdir: file to log the number of watched directories
|
|
+# statchanges: file to log the number of file change events
|
|
+# statsretry: file to log the number of retries needed so far for the hint
|
|
+# dirs: an array of directories which need to be watched recursively
|
|
+################################################################################
|
|
+
|
|
+$::csynchintmaxargs=20;
|
|
+@::csyncdirhint=("/usr/sbin/csync2", "-B","-A","-rh");
|
|
+@::csyncfilehint=("/usr/sbin/csync2", "-B","-A","-h");
|
|
+@::csynccheck=("/usr/sbin/csync2", "-B","-A","-c");
|
|
+@::csyncupdate=("/usr/sbin/csync2", "-B","-A","-u");
|
|
+$::debug=3;
|
|
+$::statsdir="/dev/shm/csyncstats/dirs";
|
|
+$::statschanges="/dev/shm/csyncstats/changes";
|
|
+$::statsretry="/dev/shm/csyncstats/retry";
|
|
+@::dirs=();
|
|
+require "/etc/csync2id.cfg";
|
|
+
|
|
+$daemonize && daemonize(0,0,$pidfileboot);
|
|
+$usesyslog && openlog("$program",'pid','daemon');
|
|
+
|
|
+use constant { LOGERR => 0, LOGWARN => 1, LOGINFO =>2, LOGDEBUG=>3,LOGSLOTS=>256 };
|
|
+my %prios=( 0 => 'err', 1 => 'warning', 2 => 'info', default => 'debug' );
|
|
+sub logger {
|
|
+ my($level,@args)=@_;
|
|
+ my ($prio)=$prios{$level}||$prios{'default'}; # :$prios{'default'};
|
|
+ if($usesyslog) {
|
|
+ syslog($prio,@args) if (($level<= LOGDEBUG && $level<=$::debug)||($::debug>=LOGDEBUG && $level&$::debug))
|
|
+ } else {
|
|
+ print "LOG: $prio ";
|
|
+ print(@args);
|
|
+ print "\n";
|
|
+ }
|
|
+}
|
|
+
|
|
+logger(LOGDEBUG,Dumper(\@::dirs));
|
|
+
|
|
+
|
|
+my $inotify = new Linux::Inotify2 or ( logger(LOGERR, "Unable to create new inotify object: $!") && die("inotify") );
|
|
+
|
|
+# For stats
|
|
+my $globaldirs=0;
|
|
+my $globalevents=0;
|
|
+my $globalhintretry=0;
|
|
+
|
|
+sub logstatsline {
|
|
+ my ($file,$line)=@_;
|
|
+# open STATS,"> $file";
|
|
+# print STATS $line;
|
|
+# close STATS;
|
|
+}
|
|
+
|
|
+
|
|
+#package Runner;
|
|
+################################################################################
|
|
+# Process runner
|
|
+# Runs processes and keep status
|
|
+# API:
|
|
+# runstatus: current status of a runslot (running/idle)
|
|
+# exitstatus: last status of an exec
|
|
+# slotrun: forkexec a new command with a callback when it's finished for a specific slot
|
|
+# Helpers:
|
|
+# reaper is the SIGCHLD handler
|
|
+# checkchildren should be called after syscalls which exited with E_INTR, and
|
|
+# calls the specific callbacks.
|
|
+################################################################################
|
|
+use constant { RUN_IDLE => 0, RUN_RUNNING => 1, RUN_REAPED =>2 };
|
|
+my %slotstatus;
|
|
+my %slotexitstatus;
|
|
+my %slotcommandline;
|
|
+my %slotcallback;
|
|
+my %slotpid2slot;
|
|
+my %slotstarttime;
|
|
+
|
|
+# pid queue for reaper
|
|
+# Every pid (key) contains a waitforpid exit status as value.
|
|
+my %slotpidreaped;
|
|
+
|
|
+sub runstatus {
|
|
+ my ($slot)=@_;
|
|
+ return($slotstatus{$slot}) if exists($slotstatus{$slot});
|
|
+ return RUN_IDLE;
|
|
+}
|
|
+sub slotrun {
|
|
+ my ($slot,$callback,$commandline)=(@_);
|
|
+ $SIG{CHLD} = \&reaper;
|
|
+ if(runstatus($slot)!=RUN_IDLE) {
|
|
+ logger(LOGDEBUG,"SlotRun: Asked to run for $slot, but $slot != RUN_IDLE");
|
|
+ return -1;
|
|
+ }
|
|
+ $slotcommandline{$slot}=$commandline;
|
|
+ $slotcallback{$slot}=$callback;
|
|
+ $slotstatus{$slot}=RUN_RUNNING;
|
|
+ $slotstarttime{$slot}=time();
|
|
+ my $pid=fork();
|
|
+ if(!$pid) {
|
|
+ # We know that exec should not return. Now tell the perl interpreter that we know.
|
|
+ {
|
|
+ exec(@$commandline);
|
|
+ }
|
|
+ logger(LOGWARN,"SlotRun: $slot Exec failed: ".join(' ','>', @$commandline,'<'));
|
|
+ # If we can't exec, we don't really know why, and we don't want to go busy fork execing
|
|
+ # Give a fork exec grace by waiting
|
|
+ sleep 1;
|
|
+ exit 1;
|
|
+ }
|
|
+ logger(LOGDEBUG,"SlotRun: $slot # ".$pid.": run".join(' ','>', @$commandline,'<'));
|
|
+ $slotpid2slot{$pid}=$slot;
|
|
+}
|
|
+sub exitstatus {
|
|
+ my ($slot)=@_;
|
|
+ return($slotexitstatus{$slot}) if exists($slotexitstatus{$slot});
|
|
+ return -1;
|
|
+}
|
|
+sub reaper {
|
|
+}
|
|
+
|
|
+sub checkchildren {
|
|
+ if($::debug==LOGSLOTS) {
|
|
+ while(my ($slot,$status) = each %slotstatus) {
|
|
+ logger(LOGDEBUG,"SlotRun: $slot status $status time: ".($status?(time()-$slotstarttime{$slot}):'x'));
|
|
+ };
|
|
+ }
|
|
+ while() {
|
|
+ my ($pid)=waitpid(-1,&WNOHANG);
|
|
+ if($pid<=0) {
|
|
+ last;
|
|
+ }
|
|
+ my $status=$?;
|
|
+ if (WIFEXITED($status)||WIFSIGNALED($status) && exists($slotpid2slot{$pid})) {
|
|
+ my $slot=$slotpid2slot{$pid};
|
|
+ delete($slotpid2slot{$pid});
|
|
+ $slotstatus{$slot}=RUN_IDLE;
|
|
+ $slotexitstatus{$slot}=$status;
|
|
+ logger(LOGDEBUG, "SlotRun: $slot $pid exited with $status == ".WEXITSTATUS($status).".\n");
|
|
+ # Callback determines if we run again or not.
|
|
+ $slotcallback{$slot}->($slot,$slotexitstatus{$slot},$slotcommandline{$slot});
|
|
+ } else {
|
|
+ logger(LOGDEBUG, "SlotRun: Unknown process $pid change state.\n");
|
|
+ }
|
|
+ }
|
|
+}
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+################################################################################
|
|
+# CSYNC RUNNERS
|
|
+# groups queued hints into single csync commands
|
|
+# run csync update and check commands
|
|
+################################################################################
|
|
+
|
|
+# use constant { CSYNCHINT => 0 , CSYNCCHECK=>1 , CSYNCUPDATE=>2 };
|
|
+my @hintfifo;
|
|
+
|
|
+sub updateCallback {
|
|
+ my ($slot,$exitstatus,$command)=@_;
|
|
+ if($exitstatus) {
|
|
+ logger(LOGWARN,"Updater got ".$exitstatus.", NOT retrying run:".join(' ','>',@$command,'<'));
|
|
+ }
|
|
+}
|
|
+sub runupdater {
|
|
+ if(runstatus('csupdate') == RUN_IDLE) {
|
|
+ slotrun('csupdate',\&updateCallback,\@::csyncupdate);
|
|
+ }
|
|
+}
|
|
+
|
|
+sub checkerCallback {
|
|
+ my ($slot,$exitstatus,$command)=@_;
|
|
+ if($exitstatus) {
|
|
+ logger(LOGWARN,"Checker got ".$exitstatus.", NOT retrying run:".join(' ','>',@$command,'<'));
|
|
+ }
|
|
+ runupdater();
|
|
+}
|
|
+sub runchecker {
|
|
+ if(runstatus('cscheck') == RUN_IDLE) {
|
|
+ slotrun('cscheck',\&checkerCallback,\@::csynccheck);
|
|
+ }
|
|
+}
|
|
+sub hinterCallback {
|
|
+ my ($slot,$exitstatus,$command)=@_;
|
|
+ if($exitstatus) {
|
|
+ logger(LOGWARN,"Hinter got ".$exitstatus.", retrying run:".join(' ','>',@$command,'<'));
|
|
+ $globalhintretry++;
|
|
+ logstatsline($::statsretry,$globalhintretry);
|
|
+ slotrun($slot,\&hinterCallback,$command);
|
|
+ } else {
|
|
+ runchecker();
|
|
+ }
|
|
+}
|
|
+sub givehints {
|
|
+ if(runstatus('cshint') == RUN_IDLE && @hintfifo) {
|
|
+ # PREPARE JOB
|
|
+ # Directories should be treated with care, one at a time.
|
|
+ my @hintcommand;
|
|
+ if($hintfifo[0]->{'recurse'}) {
|
|
+ my $filename=$hintfifo[0]->{'filename'};
|
|
+ @hintcommand=(@::csyncdirhint,$filename);
|
|
+ shift(@hintfifo) while (@hintfifo && $filename eq $hintfifo[0]->{'filename'} );
|
|
+ } else {
|
|
+ # Files can be bulked, until the next directory
|
|
+ my $nrargs=0;
|
|
+ @hintcommand=(@::csyncfilehint);
|
|
+ while($nrargs < $::csynchintmaxargs && @hintfifo && !$hintfifo[0]->{'recurse'}) {
|
|
+ my $filename=$hintfifo[0]->{'filename'};
|
|
+ push(@hintcommand,$filename);
|
|
+ shift(@hintfifo) while (@hintfifo && $filename eq $hintfifo[0]->{'filename'} );
|
|
+ $nrargs++;
|
|
+ }
|
|
+ }
|
|
+ slotrun('cshint',\&hinterCallback,\@hintcommand);
|
|
+ }
|
|
+}
|
|
+
|
|
+################################################################################
|
|
+# Subtree parser
|
|
+# Adds subtrees to an existing watch
|
|
+# globals: $globaldirs for stats.
|
|
+# Logs to logger
|
|
+################################################################################
|
|
+sub watchtree {
|
|
+ my ($inotifier,$tree,$inotifyflags) = @_;
|
|
+ $inotifier->watch ($tree, $inotifyflags);
|
|
+ $globaldirs++;
|
|
+ find(
|
|
+ sub {
|
|
+ if(! m/^\.\.?$/) {
|
|
+ my ($dev, $ino, $mode, $nlink, $uid, $gid) = lstat($_) ;
|
|
+ if(-d _ ) {
|
|
+ if ($nlink==2) {
|
|
+ $File::Find::prune = 1;
|
|
+ }
|
|
+ $inotifier->watch ($File::Find::dir.'/'.$_, $inotifyflags) or die("WatchTree: watch creation failed (maybe increase the number of watches?)");
|
|
+ $globaldirs++;
|
|
+ logger(LOGDEBUG,"WatchTree: directory ". $globaldirs." ".$File::Find::dir.'/'.$_);
|
|
+ }
|
|
+ }
|
|
+ },
|
|
+ $tree
|
|
+ );
|
|
+ logstatsline($::statsdir,$globaldirs);
|
|
+}
|
|
+
|
|
+
|
|
+################################################################################
|
|
+# Main
|
|
+#
|
|
+logger(LOGINFO, 'Main: Starting $Id: csync2id.pl,v 1.18 2008/12/24 15:34:19 ard Exp $');
|
|
+# Start watching the directories
|
|
+logger(LOGINFO, "Main: traversing directories");
|
|
+eval {
|
|
+ watchtree($inotify,$_,IN_MOVE|IN_DELETE|IN_CLOSE_WRITE|IN_ATTRIB|IN_CREATE) foreach(@::dirs)
|
|
+};
|
|
+if($@) {
|
|
+ logger(LOGERR,"Main: $@");
|
|
+ exit(2);
|
|
+}
|
|
+logger(LOGINFO,"Main: ready for events");
|
|
+
|
|
+# Kill other daemon because we are ready
|
|
+if($daemonize) {
|
|
+ if ( -e $pidfile ) {
|
|
+ my $thepid;
|
|
+ @ARGV=($pidfile);
|
|
+ $thepid=<>;
|
|
+ logger(LOGINFO, "Main: about to kill previous incarnation $thepid");
|
|
+ kill(15,$thepid);
|
|
+ sleep 0.5;
|
|
+ }
|
|
+ rename($pidfileboot,$pidfile);
|
|
+}
|
|
+
|
|
+# Main loop
|
|
+$inotify->blocking(O_NONBLOCK);
|
|
+my $timeout=20;
|
|
+while () {
|
|
+ #my ($rhset,$dummy,$dummy,$timeleft)=IO::Select->select($selectset, undef, undef, 60);
|
|
+ my $nfound;
|
|
+ my $rin='';
|
|
+ vec($rin,$inotify->fileno,1)=1;
|
|
+ ($nfound,$timeout)=select($rin, undef, undef, $timeout);
|
|
+ logger(LOGDEBUG,"Main: nrfds: $nfound timeleft: $timeout\n");
|
|
+ if(!$timeout) {
|
|
+ $timeout=20;
|
|
+ logger(LOGDEBUG, "Main: timeout->check and update");
|
|
+ runchecker();
|
|
+ runupdater();
|
|
+ #
|
|
+ }
|
|
+ if($nfound>0) {
|
|
+ my @events = $inotify->read;
|
|
+ unless (@events > 0) {
|
|
+ logger(LOGWARN,"Main: Zero events, must be a something weird");
|
|
+ }
|
|
+ foreach(@events) {
|
|
+ if($_->IN_Q_OVERFLOW) {
|
|
+ logger(LOGERR,"Main: FATAL:inotify queue overflow: csync2id was to slow to handle events");
|
|
+ }
|
|
+ if( $_->IN_ISDIR) {
|
|
+ my $recurse=0;
|
|
+ # We want to recurse only for new, renamed or deleted directories
|
|
+ $recurse=$_->IN_DELETE||$_->IN_CREATE||$_->IN_MOVED_TO||$_->IN_MOVED_FROM;
|
|
+ eval watchtree($inotify,$_->fullname,IN_MOVE|IN_DELETE|IN_CLOSE_WRITE|IN_ATTRIB|IN_CREATE) if $_->IN_CREATE||$_->IN_MOVED_TO;
|
|
+ if($@) {
|
|
+ logger(LOGINFO,"$@");
|
|
+ exit(3);
|
|
+ }
|
|
+ push(@hintfifo,{ "filename" => $_->fullname , "recurse" => $recurse });
|
|
+ logger(LOGDEBUG,"Main: dir: ".$_->mask." ".$recurse." ".$_->fullname);
|
|
+ } else {
|
|
+ # Accumulate single file events:
|
|
+ next if(@hintfifo && $hintfifo[-1]->{"filename"} eq $_->fullname);
|
|
+ push(@hintfifo,{ "filename" => $_->fullname , "recurse" => 0 });
|
|
+ logger(LOGDEBUG,"Main: file: ".$_->mask," ".$_->fullname);
|
|
+ }
|
|
+ $globalevents++;
|
|
+ }
|
|
+ }
|
|
+ checkchildren();
|
|
+ givehints();
|
|
+ logstatsline($::statschanges,$globalevents);
|
|
+}
|
|
diff --git a/copycheck.sh b/copycheck.sh
|
|
new file mode 100755
|
|
index 0000000..d4fe7d5
|
|
--- /dev/null
|
|
+++ b/copycheck.sh
|
|
@@ -0,0 +1,37 @@
|
|
+#!/bin/bash
|
|
+
|
|
+errors=0
|
|
+ignrev="r364"
|
|
+
|
|
+check() {
|
|
+ if ! svn st $1 | grep -q '^?'; then
|
|
+ years="2003 2004 2005 2006 2007 2008"
|
|
+ for y in `svn log $1 | grep '^r[0-9]' | egrep -v "^($ignrev)" | sed 's,.* \(200.\)-.*,\1,' | sort -u`
|
|
+ do
|
|
+ years=`echo $years | sed "s,$y,,"`
|
|
+ if ! grep -q "\*.*Copyright.*$y" $1; then
|
|
+ echo "Missing $y in $1."
|
|
+ (( errors++ ))
|
|
+ fi
|
|
+ done
|
|
+ for y in $years
|
|
+ do
|
|
+ if grep -q "\*.*Copyright.*$y" $1; then
|
|
+ echo "Bogus $y in $1."
|
|
+ (( errors++ ))
|
|
+ fi
|
|
+ done
|
|
+ fi
|
|
+}
|
|
+
|
|
+for f in `grep -rl '\*.*Copyright' . | grep -v '/\.svn/'` ; do
|
|
+ check $f
|
|
+done
|
|
+
|
|
+if [ $errors -ne 0 ]; then
|
|
+ echo "Found $errors errors."
|
|
+ exit 1
|
|
+fi
|
|
+
|
|
+exit 0
|
|
+
|
|
diff --git a/csync2-postgres.sql b/csync2-postgres.sql
|
|
new file mode 100644
|
|
index 0000000..c8975c1
|
|
--- /dev/null
|
|
+++ b/csync2-postgres.sql
|
|
@@ -0,0 +1,56 @@
|
|
+--
|
|
+-- Table structure for table action
|
|
+--
|
|
+
|
|
+DROP TABLE IF EXISTS action;
|
|
+CREATE TABLE action (
|
|
+ filename varchar(255) DEFAULT NULL,
|
|
+ command text,
|
|
+ logfile text,
|
|
+ UNIQUE (filename,command)
|
|
+);
|
|
+
|
|
+--
|
|
+-- Table structure for table dirty
|
|
+--
|
|
+
|
|
+DROP TABLE IF EXISTS dirty;
|
|
+CREATE TABLE dirty (
|
|
+ filename varchar(200) DEFAULT NULL,
|
|
+ forced int DEFAULT NULL,
|
|
+ myname varchar(100) DEFAULT NULL,
|
|
+ peername varchar(100) DEFAULT NULL,
|
|
+ UNIQUE (filename,peername)
|
|
+);
|
|
+
|
|
+--
|
|
+-- Table structure for table file
|
|
+--
|
|
+
|
|
+DROP TABLE IF EXISTS file;
|
|
+CREATE TABLE file (
|
|
+ filename varchar(200) DEFAULT NULL,
|
|
+ checktxt varchar(200) DEFAULT NULL,
|
|
+ UNIQUE (filename)
|
|
+);
|
|
+
|
|
+--
|
|
+-- Table structure for table hint
|
|
+--
|
|
+
|
|
+DROP TABLE IF EXISTS hint;
|
|
+CREATE TABLE hint (
|
|
+ filename varchar(255) DEFAULT NULL,
|
|
+ recursive int DEFAULT NULL
|
|
+);
|
|
+
|
|
+--
|
|
+-- Table structure for table x509_cert
|
|
+--
|
|
+
|
|
+DROP TABLE IF EXISTS x509_cert;
|
|
+CREATE TABLE x509_cert (
|
|
+ peername varchar(255) DEFAULT NULL,
|
|
+ certdata varchar(255) DEFAULT NULL,
|
|
+ UNIQUE (peername)
|
|
+);
|
|
diff --git a/csync2.c b/csync2.c
|
|
index 88fefa2..889be05 100644
|
|
--- a/csync2.c
|
|
+++ b/csync2.c
|
|
@@ -36,18 +36,25 @@
|
|
#include <errno.h>
|
|
#include <signal.h>
|
|
#include <ctype.h>
|
|
+#include <syslog.h>
|
|
+#include "db_api.h"
|
|
+#include <netdb.h>
|
|
|
|
#ifdef REAL_DBDIR
|
|
# undef DBDIR
|
|
# define DBDIR REAL_DBDIR
|
|
#endif
|
|
|
|
-static char *file_database = 0;
|
|
+char *csync_database = 0;
|
|
+
|
|
+int db_type = DB_SQLITE3;
|
|
+
|
|
static char *file_config = 0;
|
|
static char *dbdir = DBDIR;
|
|
char *cfgname = "";
|
|
|
|
char myhostname[256] = "";
|
|
+char *csync_port = "30865";
|
|
char *active_grouplist = 0;
|
|
char *active_peerlist = 0;
|
|
|
|
@@ -57,11 +64,11 @@ extern FILE *yyin;
|
|
int csync_error_count = 0;
|
|
int csync_debug_level = 0;
|
|
FILE *csync_debug_out = 0;
|
|
+int csync_syslog = 0;
|
|
|
|
int csync_server_child_pid = 0;
|
|
int csync_timestamps = 0;
|
|
int csync_new_force = 0;
|
|
-int csync_port = 30865;
|
|
|
|
int csync_dump_dir_fd = -1;
|
|
|
|
@@ -93,6 +100,11 @@ void help(char *cmd)
|
|
PACKAGE_STRING " - cluster synchronization tool, 2nd generation\n"
|
|
"LINBIT Information Technologies GmbH <http://www.linbit.com>\n"
|
|
"Copyright (C) 2004, 2005 Clifford Wolf <clifford@clifford.at>\n"
|
|
+"Copyright (C) 2010 Dennis Schafroth <dennis@schafroth.com>\n"
|
|
+"Copyright (C) 2010 Johannes Thoma <johannes.thoma@gmx.at>\n"
|
|
+"\n"
|
|
+"Version: " CSYNC2_VERSION "\n"
|
|
+"\n"
|
|
"This program is free software under the terms of the GNU GPL.\n"
|
|
"\n"
|
|
"Usage: %s [-v..] [-C config-name] \\\n"
|
|
@@ -103,7 +115,7 @@ PACKAGE_STRING " - cluster synchronization tool, 2nd generation\n"
|
|
" -c [-r] file.. Check files and maybe add to dirty db\n"
|
|
" -u [-d] [-r] file.. Updates files if listed in dirty db\n"
|
|
" -o [-r] file.. Create list of files in compare-mode\n"
|
|
-" -f [-r] file.. Force this file in sync (resolve conflict)\n"
|
|
+" -f [-r] file.. Force files to win next conflict resolution\n"
|
|
" -m file.. Mark files in database as dirty\n"
|
|
"\n"
|
|
"Simple mode:\n"
|
|
@@ -161,11 +173,11 @@ PACKAGE_STRING " - cluster synchronization tool, 2nd generation\n"
|
|
" -U Don't mark all other peers as dirty when doing a -TI run.\n"
|
|
"\n"
|
|
" -G Group1,Group2,Group3,...\n"
|
|
-" Only use this groups from config-file.\n"
|
|
+" Only use these groups from config-file.\n"
|
|
"\n"
|
|
" -P peer1,peer1,...\n"
|
|
-" Only update this peers (still mark all as dirty).\n"
|
|
-" Only show files for this peers in -o (compare) mode.\n"
|
|
+" Only update these peers (still mark all as dirty).\n"
|
|
+" Only show files for these peers in -o (compare) mode.\n"
|
|
"\n"
|
|
" -F Add new entries to dirty database with force flag set.\n"
|
|
"\n"
|
|
@@ -178,6 +190,15 @@ PACKAGE_STRING " - cluster synchronization tool, 2nd generation\n"
|
|
" found to the specified file descriptor (when doing a -c run).\n"
|
|
" The directory names in this output are zero-terminated.\n"
|
|
"\n"
|
|
+"Database switches:\n"
|
|
+"\n"
|
|
+" -D database-dir\n"
|
|
+" Use sqlite database in database dir (default: /var/lib/csync2)\n"
|
|
+"\n"
|
|
+" -a mysql-url\n"
|
|
+" Use mysql database in URL:\n"
|
|
+" mysql://[<user>:<password>@]<hostname>/<database>\n"
|
|
+"\n"
|
|
"Creating key file:\n"
|
|
" %s -k filename\n"
|
|
"\n"
|
|
@@ -190,69 +211,124 @@ PACKAGE_STRING " - cluster synchronization tool, 2nd generation\n"
|
|
int create_keyfile(const char *filename)
|
|
{
|
|
int fd = open(filename, O_WRONLY|O_CREAT|O_EXCL, 0600);
|
|
- int rand = open("/dev/random", O_RDONLY);
|
|
+ int rand = open("/dev/urandom", O_RDONLY);
|
|
char matrix[] = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789._";
|
|
unsigned char n;
|
|
int i;
|
|
-
|
|
+ int rc;
|
|
assert(sizeof(matrix) == 65);
|
|
if ( fd == -1 ) {
|
|
fprintf(stderr, "Can't create key file: %s\n", strerror(errno));
|
|
return 1;
|
|
}
|
|
if ( rand == -1 ) {
|
|
- fprintf(stderr, "Can't open /dev/random: %s\n", strerror(errno));
|
|
+ fprintf(stderr, "Can't open /dev/urandom: %s\n", strerror(errno));
|
|
return 1;
|
|
}
|
|
for (i=0; i<64; i++) {
|
|
- read(rand, &n, 1);
|
|
- write(fd, matrix+(n&63), 1);
|
|
+ rc = read(rand, &n, 1);
|
|
+ rc = write(fd, matrix+(n&63), 1);
|
|
}
|
|
- write(fd, "\n", 1);
|
|
+ rc = write(fd, "\n", 1);
|
|
close(rand);
|
|
close(fd);
|
|
return 0;
|
|
}
|
|
|
|
-static int csync_server_loop(int single_connect)
|
|
+static int csync_server_bind(void)
|
|
{
|
|
struct linger sl = { 1, 5 };
|
|
- struct sockaddr_in addr;
|
|
- int on = 1;
|
|
+ struct addrinfo hints;
|
|
+ struct addrinfo *result, *rp;
|
|
+ int save_errno;
|
|
+ int sfd, s, on = 1;
|
|
+ memset(&hints, 0, sizeof(struct addrinfo));
|
|
+ hints.ai_family = AF_UNSPEC; /* Allow IPv4 or IPv6 */
|
|
+ hints.ai_socktype = SOCK_STREAM;
|
|
+ hints.ai_flags = AI_PASSIVE;
|
|
+
|
|
+ s = getaddrinfo(NULL, csync_port, &hints, &result);
|
|
+ if (s != 0) {
|
|
+ csync_debug(1, "Cannot prepare local socket, getaddrinfo: %s\n", gai_strerror(s));
|
|
+ return -1;
|
|
+ }
|
|
|
|
- int listenfd = socket(AF_INET, SOCK_STREAM, 0);
|
|
- if (listenfd < 0) goto error;
|
|
+ /* getaddrinfo() returns a list of address structures.
|
|
+ Try each address until we successfully bind(2).
|
|
+ If socket(2) (or bind(2)) fails, we (close the socket
|
|
+ and) try the next address. */
|
|
+
|
|
+ for (rp = result; rp != NULL; rp = rp->ai_next) {
|
|
+ sfd = socket(rp->ai_family, rp->ai_socktype, rp->ai_protocol);
|
|
+ if (sfd == -1)
|
|
+ continue;
|
|
+
|
|
+ if (setsockopt(sfd, SOL_SOCKET, SO_REUSEADDR, &on, (socklen_t) sizeof(on)) < 0)
|
|
+ goto error;
|
|
+ if (setsockopt(sfd, SOL_SOCKET, SO_LINGER, &sl, (socklen_t) sizeof(sl)) < 0)
|
|
+ goto error;
|
|
+ if (setsockopt(sfd, IPPROTO_TCP, TCP_NODELAY, &on, (socklen_t) sizeof(on)) < 0)
|
|
+ goto error;
|
|
|
|
- bzero(&addr, sizeof(addr));
|
|
- addr.sin_family = AF_INET;
|
|
- addr.sin_addr.s_addr = htonl(INADDR_ANY);
|
|
- addr.sin_port = htons(csync_port);
|
|
+ if (bind(sfd, rp->ai_addr, rp->ai_addrlen) == 0)
|
|
+ break; /* Success */
|
|
+
|
|
+ close(sfd);
|
|
+ }
|
|
|
|
- if ( setsockopt(listenfd, SOL_SOCKET, SO_REUSEADDR, &on, (socklen_t) sizeof(on)) < 0 ) goto error;
|
|
- if ( setsockopt(listenfd, SOL_SOCKET, SO_LINGER, &sl, (socklen_t) sizeof(sl)) < 0 ) goto error;
|
|
- if ( setsockopt(listenfd, IPPROTO_TCP, TCP_NODELAY, &on, (socklen_t) sizeof(on)) < 0 ) goto error;
|
|
+ freeaddrinfo(result); /* No longer needed */
|
|
|
|
- if ( bind(listenfd, (struct sockaddr *) &addr, sizeof(addr)) < 0 ) goto error;
|
|
- if ( listen(listenfd, 5) < 0 ) goto error;
|
|
+ if (rp == NULL) /* No address succeeded */
|
|
+ return -1;
|
|
+
|
|
+ return sfd;
|
|
+
|
|
+error:
|
|
+ save_errno = errno;
|
|
+ close(sfd);
|
|
+ errno = save_errno;
|
|
+ return -1;
|
|
+}
|
|
|
|
+static int csync_server_loop(int single_connect)
|
|
+{
|
|
+ union {
|
|
+ struct sockaddr sa;
|
|
+ struct sockaddr_in sa_in;
|
|
+ struct sockaddr_in6 sa_in6;
|
|
+ struct sockaddr_storage ss;
|
|
+ } addr;
|
|
+ int listenfd = csync_server_bind();
|
|
+ if (listenfd < 0) goto error;
|
|
+
|
|
+ if (listen(listenfd, 5) < 0) goto error;
|
|
+
|
|
+ /* we want to "cleanly" shutdown if the connection is lost unexpectedly */
|
|
signal(SIGPIPE, SIG_IGN);
|
|
+ /* server is not interested in its childs, prevent zombies */
|
|
signal(SIGCHLD, SIG_IGN);
|
|
|
|
printf("Csync2 daemon running. Waiting for connections.\n");
|
|
|
|
while (1) {
|
|
- int addrlen = sizeof(addr);
|
|
- int conn = accept(listenfd, (struct sockaddr *) &addr, &addrlen);
|
|
+ unsigned addrlen = sizeof(addr);
|
|
+ int conn = accept(listenfd, &addr.sa, &addrlen);
|
|
if (conn < 0) goto error;
|
|
|
|
fflush(stdout); fflush(stderr);
|
|
|
|
if (single_connect || !fork()) {
|
|
+ char hbuf[NI_MAXHOST], sbuf[NI_MAXSERV];
|
|
+ /* need to restore default SIGCHLD handler in the session,
|
|
+ * as we may need to wait on them in action.c */
|
|
+ signal(SIGCHLD, SIG_DFL);
|
|
csync_server_child_pid = getpid();
|
|
- fprintf(stderr, "<%d> New connection from %s:%u.\n",
|
|
- csync_server_child_pid,
|
|
- inet_ntoa(addr.sin_addr),
|
|
- ntohs(addr.sin_port));
|
|
+ if (getnameinfo(&addr.sa, addrlen,
|
|
+ hbuf, sizeof(hbuf), sbuf, sizeof(sbuf),
|
|
+ NI_NUMERICHOST | NI_NUMERICSERV) != 0)
|
|
+ goto error;
|
|
+ fprintf(stderr, "<%d> New connection from %s:%s.\n",
|
|
+ csync_server_child_pid, hbuf, sbuf);
|
|
fflush(stderr);
|
|
|
|
dup2(conn, 0);
|
|
@@ -293,8 +369,13 @@ int main(int argc, char ** argv)
|
|
return 1;
|
|
}
|
|
|
|
- while ( (opt = getopt(argc, argv, "W:s:Ftp:G:P:C:D:N:HBAIXULSTMRvhcuoimfxrd")) != -1 ) {
|
|
+ while ( (opt = getopt(argc, argv, "a:W:s:Ftp:G:P:C:D:N:HBAIXULlSTMRvhcuoimfxrd")) != -1 ) {
|
|
+
|
|
switch (opt) {
|
|
+ case 'a':
|
|
+ csync_database = optarg;
|
|
+ db_type = DB_MYSQL;
|
|
+ break;
|
|
case 'W':
|
|
csync_dump_dir_fd = atoi(optarg);
|
|
if (write(csync_dump_dir_fd, 0, 0) < 0)
|
|
@@ -314,7 +395,7 @@ int main(int argc, char ** argv)
|
|
csync_timestamps = 1;
|
|
break;
|
|
case 'p':
|
|
- csync_port = atoi(optarg);
|
|
+ csync_port = strdup(optarg);
|
|
break;
|
|
case 'G':
|
|
active_grouplist = optarg;
|
|
@@ -349,6 +430,10 @@ int main(int argc, char ** argv)
|
|
case 'v':
|
|
csync_debug_level++;
|
|
break;
|
|
+ case 'l':
|
|
+ csync_syslog = 1;
|
|
+ openlog("csync2", LOG_ODELAY, LOG_LOCAL0);
|
|
+ break;
|
|
case 'h':
|
|
if ( mode != MODE_NONE ) help(argv[0]);
|
|
mode = MODE_HINT;
|
|
@@ -450,6 +535,13 @@ int main(int argc, char ** argv)
|
|
if ( mode == MODE_NONE )
|
|
help(argv[0]);
|
|
|
|
+ /* Some inetd connect stderr to stdout. The debug level messages on
|
|
+ * stderr would confuse the csync2 protocol. Log to syslog instead. */
|
|
+ if ( mode == MODE_INETD && csync_debug_level && !csync_syslog ) {
|
|
+ csync_syslog = 1;
|
|
+ openlog("csync2", LOG_ODELAY, LOG_LOCAL0);
|
|
+ }
|
|
+
|
|
if ( *myhostname == 0 ) {
|
|
gethostname(myhostname, 256);
|
|
myhostname[255] = 0;
|
|
@@ -482,7 +574,7 @@ int main(int argc, char ** argv)
|
|
para = cmd ? strtok(0, "\t \r\n") : 0;
|
|
|
|
if (cmd && !strcasecmp(cmd, "ssl")) {
|
|
-#ifdef HAVE_LIBGNUTLS_OPENSSL
|
|
+#ifdef HAVE_LIBGNUTLS
|
|
conn_printf("OK (activating_ssl).\n");
|
|
conn_activate_ssl(1);
|
|
|
|
@@ -503,10 +595,8 @@ int main(int argc, char ** argv)
|
|
if (para)
|
|
cfgname = strdup(url_decode(para));
|
|
}
|
|
-
|
|
if ( !*cfgname ) {
|
|
- asprintf(&file_database, "%s/%s.db", dbdir, myhostname);
|
|
- asprintf(&file_config, ETCDIR "/csync2.cfg");
|
|
+ ASPRINTF(&file_config, ETCDIR "/csync2.cfg");
|
|
} else {
|
|
int i;
|
|
|
|
@@ -518,14 +608,10 @@ int main(int argc, char ** argv)
|
|
return mode != MODE_INETD;
|
|
}
|
|
|
|
- asprintf(&file_database, "%s/%s_%s.db", dbdir, myhostname, cfgname);
|
|
- asprintf(&file_config, ETCDIR "/csync2_%s.cfg", cfgname);
|
|
+ ASPRINTF(&file_config, ETCDIR "/csync2_%s.cfg", cfgname);
|
|
}
|
|
|
|
- csync_debug(2, "My hostname is %s.\n", myhostname);
|
|
- csync_debug(2, "Database-File: %s\n", file_database);
|
|
csync_debug(2, "Config-File: %s\n", file_config);
|
|
-
|
|
yyin = fopen(file_config, "r");
|
|
if ( !yyin )
|
|
csync_fatal("Can not open config file `%s': %s\n",
|
|
@@ -533,6 +619,12 @@ int main(int argc, char ** argv)
|
|
yyparse();
|
|
fclose(yyin);
|
|
|
|
+ if (!csync_database)
|
|
+ csync_database = db_default_database(dbdir, myhostname, cfgname);
|
|
+
|
|
+ csync_debug(2, "My hostname is %s.\n", myhostname);
|
|
+ csync_debug(2, "Database-File: %s\n", csync_database);
|
|
+
|
|
{
|
|
const struct csync_group *g;
|
|
for (g=csync_group; g; g=g->next)
|
|
@@ -541,7 +633,7 @@ int main(int argc, char ** argv)
|
|
found_a_group:;
|
|
}
|
|
|
|
- csync_db_open(file_database);
|
|
+ csync_db_open(csync_database);
|
|
|
|
for (i=optind; i < argc; i++)
|
|
on_cygwin_lowercase(argv[i]);
|
|
@@ -582,8 +674,8 @@ found_a_group:;
|
|
SQL_BEGIN("Check all hints",
|
|
"SELECT filename, recursive FROM hint")
|
|
{
|
|
- textlist_add(&tl, url_decode(SQL_V[0]),
|
|
- atoi(SQL_V[1]));
|
|
+ textlist_add(&tl, url_decode(SQL_V(0)),
|
|
+ atoi(SQL_V(1)));
|
|
} SQL_END;
|
|
|
|
for (t = tl; t != 0; t = t->next) {
|
|
@@ -642,51 +734,56 @@ found_a_group:;
|
|
case MODE_MARK:
|
|
for (i=optind; i < argc; i++) {
|
|
char *realname = getrealfn(argv[i]);
|
|
+ char *pfname;
|
|
csync_check_usefullness(realname, recursive);
|
|
- csync_mark(realname, 0, 0);
|
|
+ pfname=strdup(prefixencode(realname));
|
|
+ csync_mark(pfname, 0, 0);
|
|
|
|
if ( recursive ) {
|
|
char *where_rec = "";
|
|
|
|
if ( !strcmp(realname, "/") )
|
|
- asprintf(&where_rec, "or 1");
|
|
+ ASPRINTF(&where_rec, "or 1=1");
|
|
else
|
|
- asprintf(&where_rec, "or (filename > '%s/' "
|
|
- "and filename < '%s0')",
|
|
- url_encode(realname), url_encode(realname));
|
|
+ ASPRINTF(&where_rec, "UNION ALL SELECT filename from file where filename > '%s/' "
|
|
+ "and filename < '%s0'",
|
|
+ url_encode(pfname), url_encode(pfname));
|
|
|
|
SQL_BEGIN("Adding dirty entries recursively",
|
|
"SELECT filename FROM file WHERE filename = '%s' %s",
|
|
- url_encode(realname), where_rec)
|
|
+ url_encode(pfname), where_rec)
|
|
{
|
|
- char *filename = strdup(url_encode(SQL_V[0]));
|
|
+ char *filename = strdup(url_decode(SQL_V(0)));
|
|
csync_mark(filename, 0, 0);
|
|
free(filename);
|
|
} SQL_END;
|
|
}
|
|
+ free(pfname);
|
|
}
|
|
break;
|
|
|
|
case MODE_FORCE:
|
|
for (i=optind; i < argc; i++) {
|
|
char *realname = getrealfn(argv[i]);
|
|
+ char *pfname = strdup(prefixencode(realname));
|
|
char *where_rec = "";
|
|
|
|
if ( recursive ) {
|
|
if ( !strcmp(realname, "/") )
|
|
- asprintf(&where_rec, "or 1");
|
|
+ ASPRINTF(&where_rec, "or 1=1");
|
|
else
|
|
- asprintf(&where_rec, "or (filename > '%s/' "
|
|
+ ASPRINTF(&where_rec, "or (filename > '%s/' "
|
|
"and filename < '%s0')",
|
|
url_encode(realname), url_encode(realname));
|
|
}
|
|
|
|
SQL("Mark file as to be forced",
|
|
- "UPDATE dirty SET force = 1 WHERE filename = '%s' %s",
|
|
+ "UPDATE dirty SET forced = 1 WHERE filename = '%s' %s",
|
|
url_encode(realname), where_rec);
|
|
|
|
if ( recursive )
|
|
free(where_rec);
|
|
+ free(pfname);
|
|
}
|
|
break;
|
|
|
|
@@ -695,7 +792,7 @@ found_a_group:;
|
|
SQL_BEGIN("DB Dump - Hint",
|
|
"SELECT recursive, filename FROM hint ORDER BY filename")
|
|
{
|
|
- printf("%s\t%s\n", SQL_V[0], url_decode(SQL_V[1]));
|
|
+ printf("%s\t%s\n", (char*)SQL_V(0), url_decode(SQL_V(1)));
|
|
retval = -1;
|
|
} SQL_END;
|
|
break;
|
|
@@ -705,8 +802,8 @@ found_a_group:;
|
|
SQL_BEGIN("DB Dump - File",
|
|
"SELECT checktxt, filename FROM file ORDER BY filename")
|
|
{
|
|
- if (csync_find_next(0, url_decode(SQL_V[1]))) {
|
|
- printf("%s\t%s\n", url_decode(SQL_V[0]), url_decode(SQL_V[1]));
|
|
+ if (csync_find_next(0, url_decode(SQL_V(1)))) {
|
|
+ printf("%s\t%s\n", url_decode(SQL_V(0)), url_decode(SQL_V(1)));
|
|
retval = -1;
|
|
}
|
|
} SQL_END;
|
|
@@ -717,8 +814,8 @@ found_a_group:;
|
|
SQL_BEGIN("DB Dump - File",
|
|
"SELECT checktxt, filename FROM file ORDER BY filename")
|
|
{
|
|
- if ( csync_match_file_host(url_decode(SQL_V[1]), argv[optind], argv[optind+1], 0) ) {
|
|
- printf("%s\t%s\n", url_decode(SQL_V[0]), url_decode(SQL_V[1]));
|
|
+ if ( csync_match_file_host(url_decode(SQL_V(1)), argv[optind], argv[optind+1], 0) ) {
|
|
+ printf("%s\t%s\n", url_decode(SQL_V(0)), url_decode(SQL_V(1)));
|
|
retval = -1;
|
|
}
|
|
} SQL_END;
|
|
@@ -767,11 +864,11 @@ found_a_group:;
|
|
case MODE_LIST_DIRTY:
|
|
retval = 2;
|
|
SQL_BEGIN("DB Dump - Dirty",
|
|
- "SELECT force, myname, peername, filename FROM dirty ORDER BY filename")
|
|
+ "SELECT forced, myname, peername, filename FROM dirty ORDER BY filename")
|
|
{
|
|
- if (csync_find_next(0, url_decode(SQL_V[3]))) {
|
|
- printf("%s\t%s\t%s\t%s\n", atoi(SQL_V[0]) ? "force" : "chary",
|
|
- url_decode(SQL_V[1]), url_decode(SQL_V[2]), url_decode(SQL_V[3]));
|
|
+ if (csync_find_next(0, url_decode(SQL_V(3)))) {
|
|
+ printf("%s\t%s\t%s\t%s\n", atoi(SQL_V(0)) ? "force" : "chary",
|
|
+ url_decode(SQL_V(1)), url_decode(SQL_V(2)), url_decode(SQL_V(3)));
|
|
retval = -1;
|
|
}
|
|
} SQL_END;
|
|
diff --git a/csync2.cfg b/csync2.cfg
|
|
index 338bb7b..ff9e639 100644
|
|
--- a/csync2.cfg
|
|
+++ b/csync2.cfg
|
|
@@ -1,4 +1,3 @@
|
|
-
|
|
# Csync2 Example Configuration File
|
|
# ---------------------------------
|
|
#
|
|
@@ -12,6 +11,22 @@
|
|
#
|
|
# key /etc/csync2.key_mygroup;
|
|
#
|
|
+# #
|
|
+# # WARNING:
|
|
+# # You CANNOT use paths containing a symlink
|
|
+# # component in include/exclude options!
|
|
+# #
|
|
+# # Here is a real-life example:
|
|
+# # Suppose you have some 64bit Linux systems
|
|
+# # and /usr/lib/ocf is what you want to keep
|
|
+# # in sync. On 64bit Linux systems, /usr/lib
|
|
+# # is usually a symlink to /usr/lib64.
|
|
+# # This does not work:
|
|
+# # include /usr/lib/ocf;
|
|
+# # But this does work:
|
|
+# # include /usr/lib64/ocf;
|
|
+# #
|
|
+#
|
|
# include /etc/apache;
|
|
# include %homedir%/bob;
|
|
# exclude %homedir%/bob/temp;
|
|
@@ -24,8 +39,12 @@
|
|
# exec "/usr/sbin/apache2ctl graceful";
|
|
# logfile "/var/log/csync2_action.log";
|
|
# do-local;
|
|
+# # you can use do-local-only if the execution
|
|
+# # should be done locally only
|
|
+# # do-local-only;
|
|
# }
|
|
#
|
|
+# # The backup-directory needs to be created first!
|
|
# backup-directory /var/backups/csync2;
|
|
# backup-generations 3;
|
|
#
|
|
@@ -37,4 +56,3 @@
|
|
# on host[12]: /export/users;
|
|
# on *: /home;
|
|
# }
|
|
-
|
|
diff --git a/csync2.h b/csync2.h
|
|
index 1306023..d76f880 100644
|
|
--- a/csync2.h
|
|
+++ b/csync2.h
|
|
@@ -21,7 +21,11 @@
|
|
#ifndef CSYNC2_H
|
|
#define CSYNC2_H 1
|
|
|
|
+#define CSYNC2_VERSION "2.0-rc1"
|
|
+
|
|
+#ifndef _GNU_SOURCE
|
|
#define _GNU_SOURCE
|
|
+#endif
|
|
|
|
#include "config.h"
|
|
#include <stdio.h>
|
|
@@ -31,6 +35,24 @@
|
|
#include <errno.h>
|
|
|
|
|
|
+#define DB_SCHEMA_VERSION 0
|
|
+
|
|
+/* asprintf with test for no memory */
|
|
+
|
|
+#define ASPRINTF(s, fmt, ...) do {\
|
|
+ int __ret = asprintf(s, fmt, ##__VA_ARGS__);\
|
|
+ if (__ret < 0) \
|
|
+ csync_fatal("Out of memory in asprintf at %s:%d\n", __FILE__, __LINE__);\
|
|
+} while (0)
|
|
+
|
|
+
|
|
+#define VASPRINTF(s, fmt, args) do {\
|
|
+ int __ret = vasprintf(s, fmt, args);\
|
|
+ if (__ret < 0) \
|
|
+ csync_fatal("Out of memory in vasprintf at %s:%d\n", __FILE__, __LINE__);\
|
|
+} while (0)
|
|
+
|
|
+
|
|
/* action.c */
|
|
|
|
extern void csync_schedule_commands(const char *filename, int islocal);
|
|
@@ -78,7 +100,7 @@ extern int conn_write(const void *buf, size_t count);
|
|
|
|
extern void conn_printf(const char *fmt, ...);
|
|
extern int conn_fgets(char *s, int size);
|
|
-extern int conn_gets(char *s, int size);
|
|
+extern size_t conn_gets(char *s, size_t size);
|
|
|
|
|
|
/* db.c */
|
|
@@ -91,26 +113,56 @@ extern void* csync_db_begin(const char *err, const char *fmt, ...);
|
|
extern int csync_db_next(void *vmx, const char *err,
|
|
int *pN, const char ***pazValue, const char ***pazColName);
|
|
extern void csync_db_fin(void *vmx, const char *err);
|
|
+extern const void * csync_db_colblob(void *stmtx,int col);
|
|
+extern char *db_default_database(char *dbdir, char *myhostname, char *cfg_name);
|
|
+
|
|
|
|
#define SQL(e, s, ...) csync_db_sql(e, s, ##__VA_ARGS__)
|
|
|
|
+#if 0
|
|
+#if defined(HAVE_LIBSQLITE)
|
|
#define SQL_BEGIN(e, s, ...) \
|
|
{ \
|
|
char *SQL_ERR = e; \
|
|
void *SQL_VM = csync_db_begin(SQL_ERR, s, ##__VA_ARGS__); \
|
|
int SQL_COUNT = 0; \
|
|
while (1) { \
|
|
- const char **SQL_V, **SQL_N; \
|
|
+ const char **dataSQL_V, **dataSQL_N; \
|
|
int SQL_C; \
|
|
if ( !csync_db_next(SQL_VM, SQL_ERR, \
|
|
- &SQL_C, &SQL_V, &SQL_N) ) break; \
|
|
+ &SQL_C, &dataSQL_V, &dataSQL_N) ) break; \
|
|
SQL_COUNT++;
|
|
|
|
+#define SQL_V(col) \
|
|
+ (dataSQL_V[(col)])
|
|
+#endif
|
|
+#endif
|
|
+
|
|
+// #if defined(HAVE_LIBSQLITE3)
|
|
+
|
|
+#define SQL_BEGIN(e, s, ...) \
|
|
+{ \
|
|
+ char *SQL_ERR = e; \
|
|
+ void *SQL_VM = csync_db_begin(SQL_ERR, s, ##__VA_ARGS__); \
|
|
+ int SQL_COUNT = 0; \
|
|
+\
|
|
+ if (SQL_VM) { \
|
|
+ while (1) { \
|
|
+ const char **dataSQL_V, **dataSQL_N; \
|
|
+ int SQL_C; \
|
|
+ if ( !csync_db_next(SQL_VM, SQL_ERR, \
|
|
+ &SQL_C, &dataSQL_V, &dataSQL_N) ) break; \
|
|
+ SQL_COUNT++;
|
|
+
|
|
+#define SQL_V(col) \
|
|
+ (csync_db_colblob(SQL_VM,(col)))
|
|
+// #endif
|
|
#define SQL_FIN }{
|
|
|
|
#define SQL_END \
|
|
+ } \
|
|
+ csync_db_fin(SQL_VM, SQL_ERR); \
|
|
} \
|
|
- csync_db_fin(SQL_VM, SQL_ERR); \
|
|
}
|
|
|
|
extern int db_blocking_mode;
|
|
@@ -150,6 +202,7 @@ extern void csync_remove_old();
|
|
/* daemon.c */
|
|
|
|
extern void csync_daemon_session();
|
|
+extern int csync_copy_file(int fd_in, int fd_out);
|
|
|
|
|
|
/* getrealfn.c */
|
|
@@ -170,6 +223,7 @@ const char *url_decode(const char *in);
|
|
|
|
/* another ringbuffer here. so use it with care!! */
|
|
const char *prefixsubst(const char *in);
|
|
+const char *prefixencode(const char *filename);
|
|
|
|
|
|
/* textlist implementation */
|
|
@@ -233,12 +287,13 @@ struct csync_group_host {
|
|
|
|
struct csync_group_pattern {
|
|
struct csync_group_pattern *next;
|
|
- int isinclude, iscompare;
|
|
+ int isinclude, iscompare, star_matches_slashes;
|
|
const char *pattern;
|
|
};
|
|
|
|
struct csync_group_action_pattern {
|
|
struct csync_group_action_pattern *next;
|
|
+ int star_matches_slashes;
|
|
const char *pattern;
|
|
};
|
|
|
|
@@ -253,6 +308,7 @@ struct csync_group_action {
|
|
struct csync_group_action_command *command;
|
|
const char *logfile;
|
|
int do_local;
|
|
+ int do_local_only;
|
|
};
|
|
|
|
struct csync_group {
|
|
@@ -301,8 +357,14 @@ extern struct csync_group *csync_group;
|
|
extern struct csync_prefix *csync_prefix;
|
|
extern struct csync_nossl *csync_nossl;
|
|
|
|
+extern unsigned csync_lock_timeout;
|
|
+extern char *csync_tempdir;
|
|
+
|
|
+extern char *csync_database;
|
|
+
|
|
extern int csync_error_count;
|
|
extern int csync_debug_level;
|
|
+extern int csync_syslog;
|
|
extern FILE *csync_debug_out;
|
|
|
|
extern long csync_last_printtime;
|
|
@@ -312,9 +374,9 @@ extern int csync_messages_printed;
|
|
extern int csync_server_child_pid;
|
|
extern int csync_timestamps;
|
|
extern int csync_new_force;
|
|
-extern int csync_port;
|
|
|
|
extern char myhostname[];
|
|
+extern char *csync_port;
|
|
extern char *active_grouplist;
|
|
extern char *active_peerlist;
|
|
|
|
@@ -328,7 +390,7 @@ extern int csync_dump_dir_fd;
|
|
|
|
extern int csync_compare_mode;
|
|
|
|
-#ifdef HAVE_LIBGNUTLS_OPENSSL
|
|
+#ifdef HAVE_LIBGNUTLS
|
|
extern int csync_conn_usessl;
|
|
#endif
|
|
|
|
diff --git a/csync2.spec b/csync2.spec
|
|
index 17daad6..5d342dc 100644
|
|
--- a/csync2.spec
|
|
+++ b/csync2.spec
|
|
@@ -23,15 +23,15 @@
|
|
# norootforbuild
|
|
# neededforbuild openssl openssl-devel
|
|
|
|
-BuildRequires: sqlite-devel sqlite librsync openssl-devel librsync-devel
|
|
+BuildRequires: sqlite-devel sqlite librsync gnutls-devel librsync-devel
|
|
|
|
Name: csync2
|
|
License: GPL
|
|
Group: System/Monitoring
|
|
Requires: sqlite openssl librsync
|
|
Autoreqprov: on
|
|
-Version: 1.34
|
|
-Release: 1
|
|
+Version: 2.0
|
|
+Release: 0.1.rc1
|
|
Source0: csync2-%{version}.tar.gz
|
|
URL: http://oss.linbit.com/csync2
|
|
BuildRoot: %{_tmppath}/%{name}-%{version}-build
|
|
@@ -83,6 +83,7 @@ fi
|
|
%defattr(-,root,root)
|
|
%doc ChangeLog README NEWS INSTALL TODO AUTHORS
|
|
%{_sbindir}/csync2
|
|
+%{_sbindir}/csync2-compare
|
|
%{_var}/lib/csync2
|
|
%{_mandir}/man1/csync2.1.gz
|
|
%config(noreplace) %{_sysconfdir}/xinetd.d/csync2
|
|
diff --git a/daemon.c b/daemon.c
|
|
index a6357fa..2c054ed 100644
|
|
--- a/daemon.c
|
|
+++ b/daemon.c
|
|
@@ -23,6 +23,7 @@
|
|
#include <sys/stat.h>
|
|
#include <sys/socket.h>
|
|
#include <netinet/in.h>
|
|
+#include <arpa/inet.h>
|
|
#include <string.h>
|
|
#include <fnmatch.h>
|
|
#include <stdlib.h>
|
|
@@ -38,6 +39,9 @@
|
|
|
|
static char *cmd_error;
|
|
|
|
+int csync_setBackupFileStatus(char *filename, int backupDirLength);
|
|
+
|
|
+
|
|
int csync_unlink(const char *filename, int ign)
|
|
{
|
|
struct stat st;
|
|
@@ -80,8 +84,13 @@ void csync_file_update(const char *filename, const char *peername)
|
|
url_encode(filename));
|
|
} else {
|
|
const char *checktxt = csync_genchecktxt(&st, filename, 0);
|
|
+
|
|
+ SQL("Deleting old record from file db",
|
|
+ "DELETE FROM file WHERE filename = '%s'",
|
|
+ url_encode(filename));
|
|
+
|
|
SQL("Insert record to file db",
|
|
- "insert into file (filename, checktxt) values "
|
|
+ "INSERT INTO file (filename, checktxt) values "
|
|
"('%s', '%s')", url_encode(filename),
|
|
url_encode(checktxt));
|
|
}
|
|
@@ -98,73 +107,164 @@ int csync_file_backup(const char *filename)
|
|
{
|
|
static char error_buffer[1024];
|
|
const struct csync_group *g = NULL;
|
|
+ struct stat buf;
|
|
+ int rc;
|
|
while ( (g=csync_find_next(g, filename)) ) {
|
|
- if (g->backup_directory && g->backup_generations > 0) {
|
|
- int bak_dir_len = strlen(g->backup_directory);
|
|
- int filename_len = strlen(filename);
|
|
- char backup_filename[bak_dir_len + filename_len + 10];
|
|
- char backup_otherfilename[bak_dir_len + filename_len + 10];
|
|
- int fd_in, fd_out, i;
|
|
-
|
|
- fd_in = open(filename, O_RDONLY);
|
|
- if (fd_in < 0) return 0;
|
|
-
|
|
- memcpy(backup_filename, g->backup_directory, bak_dir_len);
|
|
- for (i=0; i<filename_len; i++)
|
|
- backup_filename[bak_dir_len+i] =
|
|
- filename[i] == '/' ? '_' : filename[i];
|
|
- backup_filename[bak_dir_len] = '/';
|
|
- memcpy(backup_otherfilename, backup_filename,
|
|
- bak_dir_len + filename_len);
|
|
-
|
|
- for (i=g->backup_generations-1; i; i--) {
|
|
- snprintf(backup_filename+bak_dir_len+filename_len, 10, ".%d", i-1);
|
|
- snprintf(backup_otherfilename+bak_dir_len+filename_len, 10, ".%d", i);
|
|
- rename(backup_filename, backup_otherfilename);
|
|
- }
|
|
-
|
|
- strcpy(backup_filename+bak_dir_len+filename_len, ".0");
|
|
- fd_out = open(backup_filename, O_WRONLY|O_CREAT, 0600);
|
|
-
|
|
- if (fd_out < 0) {
|
|
- snprintf(error_buffer, 1024,
|
|
- "Open error while backing up '%s': %s\n",
|
|
- filename, strerror(errno));
|
|
- cmd_error = error_buffer;
|
|
- close(fd_in);
|
|
- return 1;
|
|
- }
|
|
-
|
|
- while (1) {
|
|
- char buffer[512];
|
|
- int read_len = read(fd_in, buffer, 512);
|
|
- int write_len = 0;
|
|
-
|
|
- if (read_len <= 0)
|
|
- break;
|
|
-
|
|
- while (write_len < read_len) {
|
|
- int rc = write(fd_out, buffer+write_len, read_len-write_len);
|
|
- if (rc <= 0) {
|
|
- snprintf(error_buffer, 1024,
|
|
- "Write error while backing up '%s': %s\n",
|
|
- filename, strerror(errno));
|
|
- cmd_error = error_buffer;
|
|
- close(fd_in);
|
|
- close(fd_out);
|
|
- return 1;
|
|
- }
|
|
- write_len += rc;
|
|
- }
|
|
- }
|
|
- close(fd_in);
|
|
- close(fd_out);
|
|
- }
|
|
+ if (g->backup_directory && g->backup_generations > 1) {
|
|
+
|
|
+ int bak_dir_len = strlen(g->backup_directory);
|
|
+ int filename_len = strlen(filename);
|
|
+ char backup_filename[bak_dir_len + filename_len + 10];
|
|
+ char backup_otherfilename[bak_dir_len + filename_len + 10];
|
|
+ int fd_in, fd_out, i;
|
|
+ int lastSlash = 0;
|
|
+ mode_t mode;
|
|
+ csync_debug(1, "backup\n");
|
|
+ // Skip generation of directories
|
|
+ rc = stat(filename, &buf);
|
|
+ if (S_ISDIR(buf.st_mode)) {
|
|
+ csync_debug(1, "directory. Skip generation \n");
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ fd_in = open(filename, O_RDONLY);
|
|
+ if (fd_in < 0)
|
|
+ return 0;
|
|
+
|
|
+ memcpy(backup_filename, g->backup_directory, bak_dir_len);
|
|
+ backup_filename[bak_dir_len] = 0;
|
|
+ mode = 0777;
|
|
+
|
|
+
|
|
+ for (i=filename_len; i> 0; i--)
|
|
+ if (filename[i] == '/') {
|
|
+ lastSlash = i;
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ for (i=0; i < filename_len; i++) {
|
|
+ // Create directories in filename
|
|
+ // TODO: Get the mode from the orig. dir
|
|
+ if (filename[i] == '/' && i <= lastSlash) {
|
|
+
|
|
+ backup_filename[bak_dir_len+i] = 0;
|
|
+
|
|
+ csync_debug(1, "mkdir %s \n", backup_filename);
|
|
+
|
|
+ mkdir(backup_filename, mode);
|
|
+ // Dont check the empty string.
|
|
+ if (i!= 0)
|
|
+ csync_setBackupFileStatus(backup_filename, bak_dir_len);
|
|
+
|
|
+ }
|
|
+ backup_filename[bak_dir_len+i] = filename[i];
|
|
+ }
|
|
+
|
|
+ backup_filename[bak_dir_len + filename_len] = 0;
|
|
+ backup_filename[bak_dir_len] = '/';
|
|
+ memcpy(backup_otherfilename, backup_filename,
|
|
+ bak_dir_len + filename_len);
|
|
+
|
|
+ //rc = unlink(
|
|
+ for (i=g->backup_generations-1; i; i--) {
|
|
+
|
|
+ if (i != 1)
|
|
+ snprintf(backup_filename+bak_dir_len+filename_len, 10, ".%d", i-1);
|
|
+ backup_filename[bak_dir_len+filename_len] = '\0';
|
|
+ snprintf(backup_otherfilename+bak_dir_len+filename_len, 10, ".%d", i);
|
|
+
|
|
+ rc = rename(backup_filename, backup_otherfilename);
|
|
+ csync_debug(1, "renaming backup files '%s' to '%s'. rc = %d\n", backup_filename, backup_otherfilename, rc);
|
|
+
|
|
+ }
|
|
+
|
|
+ /* strcpy(backup_filename+bak_dir_len+filename_len, ""); */
|
|
+
|
|
+ fd_out = open(backup_filename, O_WRONLY|O_CREAT, 0600);
|
|
+
|
|
+ if (fd_out < 0) {
|
|
+ snprintf(error_buffer, 1024,
|
|
+ "Open error while backing up '%s': %s\n",
|
|
+ filename, strerror(errno));
|
|
+ cmd_error = error_buffer;
|
|
+ close(fd_in);
|
|
+ return 1;
|
|
+ }
|
|
+
|
|
+ csync_debug(1,"Copying data from %s to backup file %s \n", filename, backup_filename);
|
|
+
|
|
+ rc = csync_copy_file(fd_in, fd_out);
|
|
+ if (rc != 0) {
|
|
+ csync_debug(1, "csync_backup error 2\n");
|
|
+
|
|
+ snprintf(error_buffer, 1024,
|
|
+ "Write error while backing up '%s': %s\n",
|
|
+ filename, strerror(errno));
|
|
+
|
|
+ cmd_error = error_buffer;
|
|
+ // TODO verify file disapeared ?
|
|
+ //
|
|
+ // return 1;
|
|
+ }
|
|
+ csync_setBackupFileStatus(backup_filename, bak_dir_len);
|
|
+ csync_debug(1, "csync_backup loop end\n");
|
|
+ }
|
|
}
|
|
-
|
|
+ csync_debug(1, "csync_backup end\n");
|
|
return 0;
|
|
}
|
|
|
|
+int csync_copy_file(int fd_in, int fd_out)
|
|
+{
|
|
+ char buffer[512];
|
|
+ int read_len = read(fd_in, buffer, 512);
|
|
+
|
|
+ while (read_len > 0) {
|
|
+ int write_len = 0;
|
|
+
|
|
+ while (write_len < read_len) {
|
|
+ int rc = write(fd_out, buffer+write_len, read_len-write_len);
|
|
+ if (rc == -1) {
|
|
+ close(fd_in);
|
|
+ close(fd_out);
|
|
+ //TODO verify return code.
|
|
+ return errno;
|
|
+ }
|
|
+ write_len += rc;
|
|
+ }
|
|
+ read_len = read(fd_in, buffer, 512);
|
|
+ }
|
|
+ close(fd_in);
|
|
+ close(fd_out);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/* get the mode from the orig directory.
|
|
+ Looking from the back_dir_len should produce the original dir.
|
|
+*/
|
|
+int csync_setBackupFileStatus(char *filename, int backupDirLength) {
|
|
+
|
|
+ struct stat buf;
|
|
+ int rc = stat((filename + backupDirLength), &buf);
|
|
+ if (rc == 0 ) {
|
|
+ csync_debug(0, "Stating original file %s rc: %d mode: %o", (filename + backupDirLength), rc, buf.st_mode);
|
|
+
|
|
+ rc = chown(filename, buf.st_uid, buf.st_gid);
|
|
+ csync_debug(0, "Changing owner of %s to user %d and group %d, rc= %d \n",
|
|
+ filename, buf.st_uid, buf.st_gid, rc);
|
|
+
|
|
+ rc = chmod(filename, buf.st_mode);
|
|
+ csync_debug(0, "Changing mode of %s to mode %d, rc= %d \n",
|
|
+ filename, buf.st_mode, rc);
|
|
+
|
|
+ }
|
|
+ else {
|
|
+ csync_debug(0, "Error getting mode and owner ship from %s \n", (filename + backupDirLength));
|
|
+ return -1;
|
|
+ }
|
|
+ return 0;
|
|
+};
|
|
+
|
|
struct csync_command {
|
|
char *text;
|
|
int check_perm;
|
|
@@ -210,16 +310,149 @@ struct csync_command cmdtab[] = {
|
|
{ 0, 0, 0, 0, 0, 0, 0 }
|
|
};
|
|
|
|
+typedef union address {
|
|
+ struct sockaddr sa;
|
|
+ struct sockaddr_in sa_in;
|
|
+ struct sockaddr_in6 sa_in6;
|
|
+ struct sockaddr_storage ss;
|
|
+} address_t;
|
|
+
|
|
+const char *csync_inet_ntop(address_t *addr)
|
|
+{
|
|
+ char buf[INET6_ADDRSTRLEN];
|
|
+ sa_family_t af = addr->sa.sa_family;
|
|
+ return inet_ntop(af,
|
|
+ af == AF_INET ? (void*)&addr->sa_in.sin_addr :
|
|
+ af == AF_INET6 ? (void*)&addr->sa_in6.sin6_addr : NULL,
|
|
+ buf, sizeof(buf));
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Loops (to cater for multihomed peers) through the address list returned by
|
|
+ * gethostbyname(), returns 1 if any match with the address obtained from
|
|
+ * getpeername() during session startup.
|
|
+ * Otherwise returns 0 (-> identification failed).
|
|
+ *
|
|
+ * TODO switch to a getnameinfo in conn_open.
|
|
+ * TODO add a "pre-authenticated" pipe mode for use over ssh */
|
|
+int verify_peername(const char *name, address_t *peeraddr)
|
|
+{
|
|
+ sa_family_t af = peeraddr->sa.sa_family;
|
|
+ struct addrinfo hints;
|
|
+ struct addrinfo *result, *rp;
|
|
+ int try_mapped_ipv4;
|
|
+ int s;
|
|
+
|
|
+ /* Obtain address(es) matching host */
|
|
+ memset(&hints, 0, sizeof(struct addrinfo));
|
|
+ hints.ai_family = AF_UNSPEC; /* Allow IPv4 or IPv6 */
|
|
+ hints.ai_socktype = SOCK_STREAM; /* Datagram socket */
|
|
+
|
|
+ s = getaddrinfo(name, NULL, &hints, &result);
|
|
+ if (s != 0) {
|
|
+ csync_debug(1, "getaddrinfo: %s\n", gai_strerror(s));
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ try_mapped_ipv4 =
|
|
+ af == AF_INET6 &&
|
|
+ !memcmp(&peeraddr->sa_in6.sin6_addr,
|
|
+ "\0\0\0\0" "\0\0\0\0" "\0\0\xff\xff", 12);
|
|
+
|
|
+ /* getaddrinfo() returns a list of address structures.
|
|
+ * Try each address. */
|
|
+
|
|
+ for (rp = result; rp != NULL; rp = rp->ai_next) {
|
|
+ /* both IPv4 */
|
|
+ if (af == AF_INET && rp->ai_family == AF_INET &&
|
|
+ !memcmp(&((struct sockaddr_in*)rp->ai_addr)->sin_addr,
|
|
+ &peeraddr->sa_in.sin_addr, sizeof(struct in_addr)))
|
|
+ break;
|
|
+ /* both IPv6 */
|
|
+ if (af == AF_INET6 && rp->ai_family == AF_INET6 &&
|
|
+ !memcmp(&((struct sockaddr_in6*)rp->ai_addr)->sin6_addr,
|
|
+ &peeraddr->sa_in6.sin6_addr, sizeof(struct in6_addr)))
|
|
+ break;
|
|
+ /* peeraddr IPv6, but actually ::ffff:I.P.v.4,
|
|
+ * and forward lookup returned IPv4 only */
|
|
+ if (af == AF_INET6 && rp->ai_family == AF_INET &&
|
|
+ try_mapped_ipv4 &&
|
|
+ !memcmp(&((struct sockaddr_in*)rp->ai_addr)->sin_addr,
|
|
+ (unsigned char*)&peeraddr->sa_in6.sin6_addr + 12,
|
|
+ sizeof(struct in_addr)))
|
|
+ break;
|
|
+ }
|
|
+ freeaddrinfo(result);
|
|
+ if (rp != NULL) /* memcmp found a match */
|
|
+ return conn_check_peer_cert(name, 0);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/* Why do all this fuzz, and not simply --assume-authenticated?
|
|
+ * To limit the impact of an accidental misconfiguration.
|
|
+ */
|
|
+void set_peername_from_env(address_t *p, const char *env)
|
|
+{
|
|
+ struct addrinfo hints = {
|
|
+ .ai_family = AF_UNSPEC,
|
|
+ .ai_socktype = SOCK_STREAM,
|
|
+ .ai_flags = AI_NUMERICHOST | AI_NUMERICSERV,
|
|
+ };
|
|
+ struct addrinfo *result;
|
|
+ char *c;
|
|
+ int s;
|
|
+
|
|
+ char *val = getenv(env);
|
|
+ csync_debug(3, "getenv(%s): >>%s<<\n", env, val ?: "");
|
|
+ if (!val)
|
|
+ return;
|
|
+ val = strdup(val);
|
|
+ if (!val)
|
|
+ return;
|
|
+
|
|
+ c = strchr(val, ' ');
|
|
+ if (!c)
|
|
+ return;
|
|
+ *c = '\0';
|
|
+
|
|
+ s = getaddrinfo(val, NULL, &hints, &result);
|
|
+ if (s != 0) {
|
|
+ csync_debug(1, "getaddrinfo: %s\n", gai_strerror(s));
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ /* getaddrinfo() may return a list of address structures.
|
|
+ * Use the first one. */
|
|
+ if (result)
|
|
+ memcpy(p, result->ai_addr, result->ai_addrlen);
|
|
+ freeaddrinfo(result);
|
|
+}
|
|
+
|
|
void csync_daemon_session()
|
|
{
|
|
- struct sockaddr_in peername;
|
|
- struct hostent *hp;
|
|
- int peerlen = sizeof(struct sockaddr_in);
|
|
+ struct stat sb;
|
|
+ address_t peername = { .sa.sa_family = AF_UNSPEC, };
|
|
+ socklen_t peerlen = sizeof(peername);
|
|
char line[4096], *peer=0, *tag[32];
|
|
int i;
|
|
|
|
- if ( getpeername(0, (struct sockaddr*)&peername, &peerlen) == -1 )
|
|
- csync_fatal("Can't run getpeername on fd 0: %s", strerror(errno));
|
|
+
|
|
+ if (fstat(0, &sb))
|
|
+ csync_fatal("Can't run fstat on fd 0: %s", strerror(errno));
|
|
+
|
|
+ switch (sb.st_mode & S_IFMT) {
|
|
+ case S_IFSOCK:
|
|
+ if ( getpeername(0, &peername.sa, &peerlen) == -1 )
|
|
+ csync_fatal("Can't run getpeername on fd 0: %s", strerror(errno));
|
|
+ break;
|
|
+ case S_IFIFO:
|
|
+ set_peername_from_env(&peername, "SSH_CLIENT");
|
|
+ break;
|
|
+ /* fall through */
|
|
+ default:
|
|
+ csync_fatal("I'm only talking to sockets or pipes! %x\n", sb.st_mode & S_IFMT);
|
|
+ break;
|
|
+ }
|
|
|
|
while ( conn_gets(line, 4096) ) {
|
|
int cmdnr;
|
|
@@ -246,13 +479,8 @@ void csync_daemon_session()
|
|
cmd_error = 0;
|
|
|
|
if ( cmdtab[cmdnr].need_ident && !peer ) {
|
|
- union {
|
|
- in_addr_t addr;
|
|
- unsigned char oct[4];
|
|
- } tmp;
|
|
- tmp.addr = peername.sin_addr.s_addr;
|
|
- conn_printf("Dear %d.%d.%d.%d, please identify first.\n",
|
|
- tmp.oct[0], tmp.oct[1], tmp.oct[2], tmp.oct[3]);
|
|
+ conn_printf("Dear %s, please identify first.\n",
|
|
+ csync_inet_ntop(&peername) ?: "stranger");
|
|
goto next_cmd;
|
|
}
|
|
|
|
@@ -443,8 +671,8 @@ void csync_daemon_session()
|
|
strcmp(tag[2], "-") ? url_encode(tag[2]) : "",
|
|
strcmp(tag[2], "-") ? "'" : "")
|
|
{
|
|
- if ( csync_match_file_host(url_decode(SQL_V[1]), tag[1], peer, (const char **)&tag[3]) )
|
|
- conn_printf("%s\t%s\n", SQL_V[0], SQL_V[1]);
|
|
+ if ( csync_match_file_host(url_decode(SQL_V(1)), tag[1], peer, (const char **)&tag[3]) )
|
|
+ conn_printf("%s\t%s\n", SQL_V(0), SQL_V(1));
|
|
} SQL_END;
|
|
break;
|
|
|
|
@@ -454,18 +682,18 @@ void csync_daemon_session()
|
|
csync_debug_level = atoi(tag[1]);
|
|
break;
|
|
case A_HELLO:
|
|
- if (peer) free(peer);
|
|
- hp = gethostbyname(tag[1]);
|
|
- if ( hp != 0 && peername.sin_family == hp->h_addrtype &&
|
|
- !memcmp(hp->h_addr, &peername.sin_addr, hp->h_length) &&
|
|
- conn_check_peer_cert(tag[1], 0)) {
|
|
+ if (peer) {
|
|
+ free(peer);
|
|
+ peer = NULL;
|
|
+ }
|
|
+ if (verify_peername(tag[1], &peername)) {
|
|
peer = strdup(tag[1]);
|
|
} else {
|
|
- peer = 0;
|
|
+ peer = NULL;
|
|
cmd_error = "Identification failed!";
|
|
break;
|
|
}
|
|
-#ifdef HAVE_LIBGNUTLS_OPENSSL
|
|
+#ifdef HAVE_LIBGNUTLS
|
|
if (!csync_conn_usessl) {
|
|
struct csync_nossl *t;
|
|
for (t = csync_nossl; t; t=t->next) {
|
|
@@ -507,7 +735,7 @@ found_asactive: ;
|
|
break;
|
|
case A_BYE:
|
|
for (i=0; i<32; i++)
|
|
- tag[i] = strdup(url_decode(tag[i]));
|
|
+ free(tag[i]);
|
|
conn_printf("OK (cu_later).\n");
|
|
return;
|
|
}
|
|
@@ -529,7 +757,6 @@ abort_cmd:
|
|
|
|
next_cmd:
|
|
for (i=0; i<32; i++)
|
|
- tag[i] = strdup(url_decode(tag[i]));
|
|
+ free(tag[i]);
|
|
}
|
|
}
|
|
-
|
|
diff --git a/db.c b/db.c
|
|
index 1cd6953..68848b3 100644
|
|
--- a/db.c
|
|
+++ b/db.c
|
|
@@ -19,13 +19,13 @@
|
|
*/
|
|
|
|
#include "csync2.h"
|
|
-#include <sqlite.h>
|
|
#include <stdio.h>
|
|
#include <stdarg.h>
|
|
#include <stdlib.h>
|
|
#include <unistd.h>
|
|
#include <signal.h>
|
|
#include <time.h>
|
|
+#include "db_api.h"
|
|
|
|
#define DEADLOCK_MESSAGE \
|
|
"Database backend is exceedingly busy => Terminating (requesting retry).\n"
|
|
@@ -33,14 +33,16 @@
|
|
int db_blocking_mode = 1;
|
|
int db_sync_mode = 1;
|
|
|
|
-static sqlite *db = 0;
|
|
+extern int db_type;
|
|
+static db_conn_p db = 0;
|
|
+// TODO make configurable
|
|
+int wait = 1;
|
|
|
|
static int get_dblock_timeout()
|
|
{
|
|
- return getpid() % 7 + 12;
|
|
+ return getpid() % 7 + csync_lock_timeout;
|
|
}
|
|
|
|
-
|
|
static int tqueries_counter = -50;
|
|
static time_t transaction_begin = 0;
|
|
static time_t last_wait_cycle = 0;
|
|
@@ -58,7 +60,7 @@ void csync_db_alarmhandler(int signum)
|
|
begin_commit_recursion++;
|
|
|
|
csync_debug(2, "Database idle in transaction. Forcing COMMIT.\n");
|
|
- SQL("COMMIT TRANSACTION", "COMMIT TRANSACTION");
|
|
+ SQL("COMMIT ", "COMMIT ");
|
|
tqueries_counter = -10;
|
|
|
|
begin_commit_recursion--;
|
|
@@ -82,7 +84,7 @@ void csync_db_maybegin()
|
|
transaction_begin = time(0);
|
|
if (!last_wait_cycle)
|
|
last_wait_cycle = transaction_begin;
|
|
- SQL("BEGIN TRANSACTION", "BEGIN TRANSACTION");
|
|
+ SQL("BEGIN ", "BEGIN ");
|
|
}
|
|
|
|
begin_commit_recursion--;
|
|
@@ -103,9 +105,11 @@ void csync_db_maycommit()
|
|
now = time(0);
|
|
|
|
if ((now - last_wait_cycle) > 10) {
|
|
- SQL("COMMIT TRANSACTION", "COMMIT TRANSACTION");
|
|
- csync_debug(2, "Waiting 2 secs so others can lock the database (%d - %d)...\n", (int)now, (int)last_wait_cycle);
|
|
- sleep(2);
|
|
+ SQL("COMMIT", "COMMIT ");
|
|
+ if (wait) {
|
|
+ csync_debug(2, "Waiting %d secs so others can lock the database (%d - %d)...\n", wait, (int)now, (int)last_wait_cycle);
|
|
+ sleep(wait);
|
|
+ }
|
|
last_wait_cycle = 0;
|
|
tqueries_counter = -10;
|
|
begin_commit_recursion--;
|
|
@@ -113,7 +117,7 @@ void csync_db_maycommit()
|
|
}
|
|
|
|
if ((tqueries_counter > 1000) || ((now - transaction_begin) > 3)) {
|
|
- SQL("COMMIT TRANSACTION", "COMMIT TRANSACTION");
|
|
+ SQL("COMMIT ", "COMMIT ");
|
|
tqueries_counter = 0;
|
|
begin_commit_recursion--;
|
|
return;
|
|
@@ -128,45 +132,23 @@ void csync_db_maycommit()
|
|
|
|
void csync_db_open(const char *file)
|
|
{
|
|
- db = sqlite_open(file, 0, 0);
|
|
- if ( db == 0 )
|
|
+ int rc = db_open(file, db_type, &db);
|
|
+ if ( rc != DB_OK )
|
|
csync_fatal("Can't open database: %s\n", file);
|
|
|
|
+ db_set_logger(db, csync_debug);
|
|
+
|
|
/* ignore errors on table creation */
|
|
in_sql_query++;
|
|
- sqlite_exec(db,
|
|
- "CREATE TABLE file ("
|
|
- " filename, checktxt,"
|
|
- " UNIQUE ( filename ) ON CONFLICT REPLACE"
|
|
- ")",
|
|
- 0, 0, 0);
|
|
- sqlite_exec(db,
|
|
- "CREATE TABLE dirty ("
|
|
- " filename, force, myname, peername,"
|
|
- " UNIQUE ( filename, peername ) ON CONFLICT IGNORE"
|
|
- ")",
|
|
- 0, 0, 0);
|
|
- sqlite_exec(db,
|
|
- "CREATE TABLE hint ("
|
|
- " filename, recursive,"
|
|
- " UNIQUE ( filename, recursive ) ON CONFLICT IGNORE"
|
|
- ")",
|
|
- 0, 0, 0);
|
|
- sqlite_exec(db,
|
|
- "CREATE TABLE action ("
|
|
- " filename, command, logfile,"
|
|
- " UNIQUE ( filename, command ) ON CONFLICT IGNORE"
|
|
- ")",
|
|
- 0, 0, 0);
|
|
- sqlite_exec(db,
|
|
- "CREATE TABLE x509_cert ("
|
|
- " peername, certdata,"
|
|
- " UNIQUE ( peername ) ON CONFLICT IGNORE"
|
|
- ")",
|
|
- 0, 0, 0);
|
|
+
|
|
+ if (db_schema_version(db) < DB_SCHEMA_VERSION)
|
|
+ if (db_upgrade_to_schema(db, DB_SCHEMA_VERSION) != DB_OK)
|
|
+ csync_fatal("Cannot create database tables (version requested = %d): %s\n", DB_SCHEMA_VERSION, db_errmsg(db));
|
|
+
|
|
if (!db_sync_mode)
|
|
- sqlite_exec(db, "PRAGMA synchronous = OFF", 0, 0, 0);
|
|
+ db_exec(db, "PRAGMA synchronous = OFF");
|
|
in_sql_query--;
|
|
+ // return db;
|
|
}
|
|
|
|
void csync_db_close()
|
|
@@ -175,10 +157,10 @@ void csync_db_close()
|
|
|
|
begin_commit_recursion++;
|
|
if (tqueries_counter > 0) {
|
|
- SQL("COMMIT TRANSACTION", "COMMIT TRANSACTION");
|
|
+ SQL("COMMIT ", "COMMIT ");
|
|
tqueries_counter = -10;
|
|
}
|
|
- sqlite_close(db);
|
|
+ db_close(db);
|
|
begin_commit_recursion--;
|
|
db = 0;
|
|
}
|
|
@@ -190,7 +172,7 @@ void csync_db_sql(const char *err, const char *fmt, ...)
|
|
int rc, busyc = 0;
|
|
|
|
va_start(ap, fmt);
|
|
- vasprintf(&sql, fmt, ap);
|
|
+ VASPRINTF(&sql, fmt, ap);
|
|
va_end(ap);
|
|
|
|
in_sql_query++;
|
|
@@ -199,15 +181,15 @@ void csync_db_sql(const char *err, const char *fmt, ...)
|
|
csync_debug(2, "SQL: %s\n", sql);
|
|
|
|
while (1) {
|
|
- rc = sqlite_exec(db, sql, 0, 0, 0);
|
|
- if ( rc != SQLITE_BUSY ) break;
|
|
- if (busyc++ > get_dblock_timeout()) { db = 0; csync_fatal(DEADLOCK_MESSAGE); }
|
|
- csync_debug(2, "Database is busy, sleeping a sec.\n");
|
|
- sleep(1);
|
|
+ rc = db_exec(db, sql);
|
|
+ if ( rc != DB_BUSY ) break;
|
|
+ if (busyc++ > get_dblock_timeout()) { db = 0; csync_fatal(DEADLOCK_MESSAGE); }
|
|
+ csync_debug(2, "Database is busy, sleeping a sec.\n");
|
|
+ sleep(1);
|
|
}
|
|
|
|
- if ( rc != SQLITE_OK && err )
|
|
- csync_fatal("Database Error: %s [%d]: %s\n", err, rc, sql);
|
|
+ if ( rc != DB_OK && err )
|
|
+ csync_fatal("Database Error: %s [%d]: %s on executing %s\n", err, rc, db_errmsg(db), sql);
|
|
free(sql);
|
|
|
|
csync_db_maycommit();
|
|
@@ -216,77 +198,140 @@ void csync_db_sql(const char *err, const char *fmt, ...)
|
|
|
|
void* csync_db_begin(const char *err, const char *fmt, ...)
|
|
{
|
|
- sqlite_vm *vm;
|
|
+ db_stmt_p stmt = NULL;
|
|
char *sql;
|
|
va_list ap;
|
|
int rc, busyc = 0;
|
|
-
|
|
+ char *ppTail;
|
|
va_start(ap, fmt);
|
|
- vasprintf(&sql, fmt, ap);
|
|
+ VASPRINTF(&sql, fmt, ap);
|
|
va_end(ap);
|
|
|
|
in_sql_query++;
|
|
csync_db_maybegin();
|
|
|
|
csync_debug(2, "SQL: %s\n", sql);
|
|
-
|
|
while (1) {
|
|
- rc = sqlite_compile(db, sql, 0, &vm, 0);
|
|
- if ( rc != SQLITE_BUSY ) break;
|
|
+ rc = db_prepare_stmt(db, sql, &stmt, &ppTail);
|
|
+ if ( rc != DB_BUSY ) break;
|
|
if (busyc++ > get_dblock_timeout()) { db = 0; csync_fatal(DEADLOCK_MESSAGE); }
|
|
csync_debug(2, "Database is busy, sleeping a sec.\n");
|
|
sleep(1);
|
|
}
|
|
|
|
- if ( rc != SQLITE_OK && err )
|
|
- csync_fatal("Database Error: %s [%d]: %s\n", err, rc, sql);
|
|
+ if ( rc != DB_OK && err )
|
|
+ csync_fatal("Database Error: %s [%d]: %s on executing %s\n", err, rc, db_errmsg(db), sql);
|
|
free(sql);
|
|
|
|
- return vm;
|
|
+ return stmt;
|
|
+}
|
|
+
|
|
+const char *csync_db_get_column_text(void *stmt, int column) {
|
|
+ return db_stmt_get_column_text(stmt, column);
|
|
+}
|
|
+
|
|
+int csync_db_get_column_int(void *stmt, int column) {
|
|
+ return db_stmt_get_column_int((db_stmt_p) stmt, column);
|
|
}
|
|
|
|
int csync_db_next(void *vmx, const char *err,
|
|
int *pN, const char ***pazValue, const char ***pazColName)
|
|
{
|
|
- sqlite_vm *vm = vmx;
|
|
+ db_stmt_p stmt = vmx;
|
|
int rc, busyc = 0;
|
|
|
|
csync_debug(4, "Trying to fetch a row from the database.\n");
|
|
|
|
while (1) {
|
|
- rc = sqlite_step(vm, pN, pazValue, pazColName);
|
|
- if ( rc != SQLITE_BUSY ) break;
|
|
- if (busyc++ > get_dblock_timeout()) { db = 0; csync_fatal(DEADLOCK_MESSAGE); }
|
|
+ rc = db_stmt_next(stmt);
|
|
+ if ( rc != DB_BUSY )
|
|
+ break;
|
|
+ if (busyc++ > get_dblock_timeout()) {
|
|
+ db = 0;
|
|
+ csync_fatal(DEADLOCK_MESSAGE);
|
|
+ }
|
|
csync_debug(2, "Database is busy, sleeping a sec.\n");
|
|
sleep(1);
|
|
}
|
|
|
|
- if ( rc != SQLITE_OK && rc != SQLITE_ROW &&
|
|
- rc != SQLITE_DONE && err )
|
|
- csync_fatal("Database Error: %s [%d].\n", err, rc);
|
|
+ if ( rc != DB_OK && rc != DB_ROW &&
|
|
+ rc != DB_DONE && err )
|
|
+ csync_fatal("Database Error: %s [%d]: %s\n", err, rc, db_errmsg(db));
|
|
+
|
|
+ return rc == DB_ROW;
|
|
+}
|
|
|
|
- return rc == SQLITE_ROW;
|
|
+const void * csync_db_colblob(void *stmtx, int col) {
|
|
+ db_stmt_p stmt = stmtx;
|
|
+ const void *ptr = stmt->get_column_blob(stmt, col);
|
|
+ if (stmt->db && stmt->db->logger) {
|
|
+ stmt->db->logger(4, "DB get blob: %s ", (char *) ptr);
|
|
+ }
|
|
+ return ptr;
|
|
}
|
|
|
|
void csync_db_fin(void *vmx, const char *err)
|
|
{
|
|
- sqlite_vm *vm = vmx;
|
|
+ db_stmt_p stmt = (db_stmt_p) vmx;
|
|
int rc, busyc = 0;
|
|
|
|
+ if (vmx == NULL)
|
|
+ return;
|
|
+
|
|
csync_debug(2, "SQL Query finished.\n");
|
|
|
|
while (1) {
|
|
- rc = sqlite_finalize(vm, 0);
|
|
- if ( rc != SQLITE_BUSY ) break;
|
|
- if (busyc++ > get_dblock_timeout()) { db = 0; csync_fatal(DEADLOCK_MESSAGE); }
|
|
- csync_debug(2, "Database is busy, sleeping a sec.\n");
|
|
- sleep(1);
|
|
+ rc = db_stmt_close(stmt);
|
|
+ if ( rc != DB_BUSY )
|
|
+ break;
|
|
+ if (busyc++ > get_dblock_timeout()) { db = 0; csync_fatal(DEADLOCK_MESSAGE); }
|
|
+ csync_debug(2, "Database is busy, sleeping a sec.\n");
|
|
+ sleep(1);
|
|
}
|
|
|
|
- if ( rc != SQLITE_OK && err )
|
|
- csync_fatal("Database Error: %s [%d].\n", err, rc);
|
|
+ if ( rc != DB_OK && err )
|
|
+ csync_fatal("Database Error: %s [%d]: %s\n", err, rc, db_errmsg(db));
|
|
|
|
csync_db_maycommit();
|
|
in_sql_query--;
|
|
}
|
|
|
|
+#if defined(HAVE_SQLITE)
|
|
+#define DBEXTENSION ".db"
|
|
+#endif
|
|
+#if defined(HAVE_SQLITE3)
|
|
+#define DBEXTENSION ".db3"
|
|
+#endif
|
|
+
|
|
+char *db_default_database(char *dbdir, char *myhostname, char *cfg_name)
|
|
+{
|
|
+ char *db;
|
|
+
|
|
+#if defined(HAVE_SQLITE3)
|
|
+ if (cfg_name[0] != '\0')
|
|
+ ASPRINTF(&db, "sqlite3://%s/%s_%s" DBEXTENSION, dbdir, myhostname, cfgname);
|
|
+ else
|
|
+ ASPRINTF(&db, "sqlite3://%s/%s" DBEXTENSION, dbdir, myhostname);
|
|
+#elif defined(HAVE_SQLITE)
|
|
+ if (cfg_name[0] != '\0')
|
|
+ ASPRINTF(&db, "sqlite2://%s/%s_%s" DBEXTENSION, dbdir, myhostname, cfgname);
|
|
+ else
|
|
+ ASPRINTF(&db, "sqlite2://%s/%s" DBEXTENSION, dbdir, myhostname);
|
|
+#elif defined(HAVE_MYSQL)
|
|
+ if (cfg_name[0] != '\0')
|
|
+ ASPRINTF(&db, "mysql://root@localhost/csync2_%s_%s" DBEXTENSION, myhostname, cfgname);
|
|
+ else
|
|
+ ASPRINTF(&db, "mysql://root@localhost/csync2_%s" DBEXTENSION, myhostname);
|
|
+
|
|
+#elif defined(HAVE_POSTGRES)
|
|
+ if (cfg_name[0] != '\0')
|
|
+ ASPRINTF(&db, "pgsql://root@localhost/csync2_%s_%s" DBEXTENSION, myhostname, cfgname);
|
|
+ else
|
|
+ ASPRINTF(&db, "pgsql://root@localhost/csync2_%s" DBEXTENSION, myhostname);
|
|
+
|
|
+#else
|
|
+#error "No database backend available. Please install either libpg, libmysqlclient or libsqlite, reconfigure and recompile"
|
|
+#endif
|
|
+
|
|
+ return db;
|
|
+}
|
|
diff --git a/db_api.c b/db_api.c
|
|
new file mode 100644
|
|
index 0000000..af5591c
|
|
--- /dev/null
|
|
+++ b/db_api.c
|
|
@@ -0,0 +1,186 @@
|
|
+/*
|
|
+ DB API
|
|
+
|
|
+ */
|
|
+
|
|
+#include "csync2.h"
|
|
+#include <stdio.h>
|
|
+#include <stdarg.h>
|
|
+#include <stdlib.h>
|
|
+#include <unistd.h>
|
|
+#include <signal.h>
|
|
+#include <time.h>
|
|
+#include "db_api.h"
|
|
+
|
|
+#include "db_mysql.h"
|
|
+#include "db_postgres.h"
|
|
+#include "db_sqlite.h"
|
|
+#include "db_sqlite2.h"
|
|
+
|
|
+#define DEADLOCK_MESSAGE \
|
|
+ "Database backend is exceedingly busy => Terminating (requesting retry).\n"
|
|
+
|
|
+int db_sqlite_open(const char *file, db_conn_p *db);
|
|
+int db_mysql_open(const char *file, db_conn_p *db);
|
|
+
|
|
+int db_detect_type(const char **db_str, int type) {
|
|
+ const char *db_types[] = { "mysql://", "sqlite3://", "sqlite2://", "pgsql://", 0 };
|
|
+ int types[] = { DB_MYSQL, DB_SQLITE3, DB_SQLITE2, DB_PGSQL };
|
|
+ int index;
|
|
+ for (index = 0; 1 ; index++) {
|
|
+ if (db_types[index] == 0)
|
|
+ break;
|
|
+ if (!strncmp(*db_str, db_types[index], strlen(db_types[index]))) {
|
|
+ *db_str += strlen(db_types[index]);
|
|
+ return types[index];
|
|
+ }
|
|
+ }
|
|
+ return type;
|
|
+}
|
|
+
|
|
+int db_open(const char *file, int type, db_conn_p *db)
|
|
+{
|
|
+ int rc = DB_ERROR;
|
|
+ const char *db_str;
|
|
+ db_str = file;
|
|
+
|
|
+ type = db_detect_type(&db_str, type);
|
|
+ /* Switch between implementation */
|
|
+ switch (type) {
|
|
+ case DB_SQLITE2:
|
|
+ rc = db_sqlite2_open(db_str, db);
|
|
+
|
|
+ if (rc != DB_OK && db_str[0] != '/')
|
|
+ fprintf(csync_debug_out, "Cannot open database file: %s, maybe you need three slashes (like sqlite:///var/lib/csync2/csync2.db)\n", db_str);
|
|
+ break;
|
|
+ case DB_SQLITE3:
|
|
+ rc = db_sqlite_open(db_str, db);
|
|
+
|
|
+ if (rc != DB_OK && db_str[0] != '/')
|
|
+ fprintf(csync_debug_out, "Cannot open database file: %s, maybe you need three slashes (like sqlite:///var/lib/csync2/csync2.db)\n", db_str);
|
|
+ break;
|
|
+#ifdef HAVE_MYSQL
|
|
+ case DB_MYSQL:
|
|
+ rc = db_mysql_open(db_str, db);
|
|
+ break;
|
|
+#else
|
|
+ case DB_MYSQL:
|
|
+ csync_fatal("No Mysql support configured. Please reconfigure with --enable-mysql (database is %s).\n", file);
|
|
+ rc = DB_ERROR;
|
|
+ break;
|
|
+#endif
|
|
+#ifdef HAVE_POSTGRES
|
|
+ case DB_PGSQL:
|
|
+ rc = db_postgres_open(db_str, db);
|
|
+ break;
|
|
+#else
|
|
+ case DB_PGSQL:
|
|
+ csync_fatal("No Postgres SQL support configured. Please reconfigure with --enable-postgres (database is %s).\n", file);
|
|
+ rc = DB_ERROR;
|
|
+ break;
|
|
+#endif
|
|
+
|
|
+ default:
|
|
+ csync_fatal("Database type not found. Can't open database %s\n", file);
|
|
+ rc = DB_ERROR;
|
|
+ }
|
|
+ if (*db)
|
|
+ (*db)->logger = 0;
|
|
+ return rc;
|
|
+}
|
|
+
|
|
+void db_set_logger(db_conn_p conn, void (*logger)(int lv, const char *fmt, ...)) {
|
|
+ if (conn == NULL)
|
|
+ csync_fatal("No connection in set_logger.\n");
|
|
+
|
|
+ conn->logger = logger;
|
|
+}
|
|
+
|
|
+void db_close(db_conn_p conn)
|
|
+{
|
|
+ if (!conn || !conn->close)
|
|
+ return;
|
|
+ conn->close(conn);
|
|
+}
|
|
+
|
|
+const char *db_errmsg(db_conn_p conn)
|
|
+{
|
|
+ if (conn && conn->errmsg)
|
|
+ return conn->errmsg(conn);
|
|
+
|
|
+ return "(no error message function available)";
|
|
+}
|
|
+
|
|
+int db_exec(db_conn_p conn, const char *sql) {
|
|
+ if (conn && conn->exec)
|
|
+ return conn->exec(conn, sql);
|
|
+
|
|
+ csync_debug(0, "No exec function in db_exec.\n");
|
|
+ return DB_ERROR;
|
|
+}
|
|
+
|
|
+int db_prepare_stmt(db_conn_p conn, const char *sql, db_stmt_p *stmt, char **pptail) {
|
|
+ if (conn && conn->prepare)
|
|
+ return conn->prepare(conn, sql, stmt, pptail);
|
|
+
|
|
+ csync_debug(0, "No prepare function in db_prepare_stmt.\n");
|
|
+ return DB_ERROR;
|
|
+}
|
|
+
|
|
+const char *db_stmt_get_column_text(db_stmt_p stmt, int column) {
|
|
+ if (stmt && stmt->get_column_text)
|
|
+ return stmt->get_column_text(stmt, column);
|
|
+
|
|
+ csync_debug(0, "No stmt in db_stmt_get_column_text / no function.\n");
|
|
+ return NULL;
|
|
+}
|
|
+
|
|
+int db_stmt_get_column_int(db_stmt_p stmt, int column) {
|
|
+ if (stmt && stmt->get_column_int)
|
|
+ return stmt->get_column_int(stmt, column);
|
|
+
|
|
+ csync_debug(0, "No stmt in db_stmt_get_column_int / no function.\n");
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int db_stmt_next(db_stmt_p stmt)
|
|
+{
|
|
+ if (stmt && stmt->next)
|
|
+ return stmt->next(stmt);
|
|
+
|
|
+ csync_debug(0, "No stmt in db_stmt_next / no function.\n");
|
|
+ return DB_ERROR;
|
|
+}
|
|
+
|
|
+int db_stmt_close(db_stmt_p stmt)
|
|
+{
|
|
+ if (stmt && stmt->close)
|
|
+ return stmt->close(stmt);
|
|
+
|
|
+ csync_debug(0, "No stmt in db_stmt_close / no function.\n");
|
|
+ return DB_ERROR;
|
|
+}
|
|
+
|
|
+int db_schema_version(db_conn_p db)
|
|
+{
|
|
+ int version = -1;
|
|
+
|
|
+ SQL_BEGIN(NULL, /* ignore errors */
|
|
+ "SELECT count(*) from file")
|
|
+ {
|
|
+ version = 0;
|
|
+ } SQL_END;
|
|
+
|
|
+ return version;
|
|
+}
|
|
+
|
|
+
|
|
+int db_upgrade_to_schema(db_conn_p db, int version)
|
|
+{
|
|
+ if (db && db->upgrade_to_schema)
|
|
+ return db->upgrade_to_schema(version);
|
|
+
|
|
+ return DB_ERROR;
|
|
+}
|
|
+
|
|
+
|
|
diff --git a/db_api.h b/db_api.h
|
|
new file mode 100644
|
|
index 0000000..eab627b
|
|
--- /dev/null
|
|
+++ b/db_api.h
|
|
@@ -0,0 +1,62 @@
|
|
+
|
|
+#ifndef DB_API_H
|
|
+#define DB_API_H
|
|
+
|
|
+#define DB_SQLITE2 1
|
|
+#define DB_SQLITE3 2
|
|
+#define DB_MYSQL 3
|
|
+#define DB_PGSQL 4
|
|
+
|
|
+#define DB_OK 0
|
|
+#define DB_ERROR 1
|
|
+#define DB_BUSY 2
|
|
+#define DB_NO_CONNECTION 3
|
|
+#define DB_NO_CONNECTION_REAL 4
|
|
+#define DB_ROW 100
|
|
+#define DB_DONE 101
|
|
+
|
|
+typedef struct db_conn_t *db_conn_p;
|
|
+typedef struct db_stmt_t *db_stmt_p;
|
|
+
|
|
+struct db_conn_t {
|
|
+ void *private;
|
|
+ int (*exec) (db_conn_p conn, const char* exec);
|
|
+ int (*prepare)(db_conn_p conn, const char *statement, db_stmt_p *stmt, char **value);
|
|
+ void (*close) (db_conn_p conn);
|
|
+ void (*logger) (int lv, const char *fmt, ...);
|
|
+ const char* (*errmsg) (db_conn_p conn);
|
|
+ int (*upgrade_to_schema) (int version);
|
|
+};
|
|
+
|
|
+struct db_stmt_t {
|
|
+ void *private;
|
|
+ void *private2;
|
|
+ db_conn_p db;
|
|
+ const char * (*get_column_text) (db_stmt_p vmx, int column);
|
|
+ const void* (*get_column_blob) (db_stmt_p vmx, int column);
|
|
+ int (*get_column_int) (db_stmt_p vmx, int column);
|
|
+ int (*next) (db_stmt_p stmt);
|
|
+ int (*close)(db_stmt_p stmt);
|
|
+};
|
|
+
|
|
+//struct db_conn *db_conn;
|
|
+
|
|
+int db_open(const char *file, int type, db_conn_p *db);
|
|
+void db_close(db_conn_p conn);
|
|
+
|
|
+int db_exec(db_conn_p conn, const char* exec);
|
|
+int db_exec2(db_conn_p conn, const char* exec, void (*callback)(void *, int, int), void *data, const char **err);
|
|
+
|
|
+int db_prepare_stmt(db_conn_p conn, const char *statement, db_stmt_p *stmt, char **value);
|
|
+
|
|
+const char * db_stmt_get_column_text(db_stmt_p stmt, int column);
|
|
+int db_stmt_get_column_int(db_stmt_p stmt, int column);
|
|
+int db_stmt_next (db_stmt_p stmt);
|
|
+int db_stmt_close(db_stmt_p stmt);
|
|
+
|
|
+void db_set_logger(db_conn_p conn, void (*logger)(int lv, const char *fmt, ...));
|
|
+int db_schema_version(db_conn_p db);
|
|
+int db_upgrade_to_schema(db_conn_p db, int version);
|
|
+const char *db_errmsg(db_conn_p conn);
|
|
+
|
|
+#endif
|
|
diff --git a/db_mysql.c b/db_mysql.c
|
|
new file mode 100644
|
|
index 0000000..1b6d09e
|
|
--- /dev/null
|
|
+++ b/db_mysql.c
|
|
@@ -0,0 +1,408 @@
|
|
+/*
|
|
+ * Copyright (C) 2010 Dennis Schafroth <dennis@schafroth.com>>
|
|
+ * Copyright (C) 2010 Johannes Thoma <johannes.thoma@gmx.at>
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or modify
|
|
+ * it under the terms of the GNU General Public License as published by
|
|
+ * the Free Software Foundation; either version 2 of the License, or
|
|
+ * (at your option) any later version.
|
|
+ *
|
|
+ * This program is distributed in the hope that it will be useful,
|
|
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
+ * GNU General Public License for more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License
|
|
+ * along with this program; if not, write to the Free Software
|
|
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
|
+ */
|
|
+
|
|
+#include "csync2.h"
|
|
+#include <stdio.h>
|
|
+#include <stdarg.h>
|
|
+#include <stdlib.h>
|
|
+#include <unistd.h>
|
|
+#include <signal.h>
|
|
+#include <time.h>
|
|
+#include <string.h>
|
|
+#include "db_api.h"
|
|
+#include "db_mysql.h"
|
|
+#include "dl.h"
|
|
+
|
|
+#ifdef HAVE_MYSQL
|
|
+#include <mysql/mysql.h>
|
|
+#include <mysql/mysqld_error.h>
|
|
+
|
|
+static struct db_mysql_fns {
|
|
+ MYSQL *(*mysql_init_fn)(MYSQL*);
|
|
+ MYSQL *(*mysql_real_connect_fn)(MYSQL *, const char *, const char *, const char *, const char *, unsigned int, const char *, unsigned long);
|
|
+ int (*mysql_errno_fn)(MYSQL*);
|
|
+ int (*mysql_query_fn)(MYSQL*, const char*);
|
|
+ void (*mysql_close_fn)(MYSQL*);
|
|
+ const char *(*mysql_error_fn)(MYSQL *);
|
|
+ MYSQL_RES *(*mysql_store_result_fn)(MYSQL *);
|
|
+ unsigned int (*mysql_num_fields_fn)(MYSQL_RES *);
|
|
+ MYSQL_ROW (*mysql_fetch_row_fn)(MYSQL_RES *);
|
|
+ void (*mysql_free_result_fn)(MYSQL_RES *);
|
|
+ unsigned int (*mysql_warning_count_fn)(MYSQL *);
|
|
+} f;
|
|
+
|
|
+static void *dl_handle;
|
|
+
|
|
+
|
|
+static void db_mysql_dlopen(void)
|
|
+{
|
|
+ csync_debug(1, "Opening shared library libmysqlclient.so\n");
|
|
+ dl_handle = dlopen("libmysqlclient.so", RTLD_LAZY);
|
|
+ if (dl_handle == NULL) {
|
|
+ csync_fatal("Could not open libmysqlclient.so: %s\nPlease install Mysql client library (libmysqlclient) or use other database (sqlite, postgres)\n", dlerror());
|
|
+ }
|
|
+
|
|
+ csync_debug(1, "Reading symbols from shared library libmysqlclient.so\n");
|
|
+
|
|
+ LOOKUP_SYMBOL(dl_handle, mysql_init);
|
|
+ LOOKUP_SYMBOL(dl_handle, mysql_real_connect);
|
|
+ LOOKUP_SYMBOL(dl_handle, mysql_errno);
|
|
+ LOOKUP_SYMBOL(dl_handle, mysql_query);
|
|
+ LOOKUP_SYMBOL(dl_handle, mysql_close);
|
|
+ LOOKUP_SYMBOL(dl_handle, mysql_error);
|
|
+ LOOKUP_SYMBOL(dl_handle, mysql_store_result);
|
|
+ LOOKUP_SYMBOL(dl_handle, mysql_num_fields);
|
|
+ LOOKUP_SYMBOL(dl_handle, mysql_fetch_row);
|
|
+ LOOKUP_SYMBOL(dl_handle, mysql_free_result);
|
|
+ LOOKUP_SYMBOL(dl_handle, mysql_warning_count);
|
|
+}
|
|
+
|
|
+
|
|
+int db_mysql_parse_url(char *url, char **host, char **user, char **pass, char **database, unsigned int *port, char **unix_socket)
|
|
+{
|
|
+ char *pos = strchr(url, '@');
|
|
+ if (pos) {
|
|
+ // Optional user/passwd
|
|
+ *(pos) = 0;
|
|
+ *(user) = url;
|
|
+ url = pos + 1;
|
|
+ // TODO password
|
|
+ pos = strchr(*user, ':');
|
|
+ if (pos) {
|
|
+ *(pos) = 0;
|
|
+ *(pass) = (pos +1);
|
|
+ }
|
|
+ else
|
|
+ *pass = 0;
|
|
+ }
|
|
+ else {
|
|
+ // No user/pass password
|
|
+ *user = 0;
|
|
+ *pass = 0;
|
|
+ }
|
|
+ *host = url;
|
|
+ pos = strchr(*host, '/');
|
|
+ if (pos) {
|
|
+ // Database
|
|
+ (*pos) = 0;
|
|
+ *database = pos+1;
|
|
+ }
|
|
+ else {
|
|
+ *database = 0;
|
|
+ }
|
|
+ pos = strchr(*host, ':');
|
|
+ if (pos) {
|
|
+ (*pos) = 0;
|
|
+ *port = atoi(pos+1);
|
|
+ }
|
|
+ *unix_socket = 0;
|
|
+ return DB_OK;
|
|
+}
|
|
+
|
|
+#endif
|
|
+
|
|
+int db_mysql_open(const char *file, db_conn_p *conn_p)
|
|
+{
|
|
+#ifdef HAVE_MYSQL
|
|
+ db_mysql_dlopen();
|
|
+
|
|
+ MYSQL *db = f.mysql_init_fn(0);
|
|
+ char *host, *user, *pass, *database, *unix_socket;
|
|
+ unsigned int port;
|
|
+ char *db_url = malloc(strlen(file)+1);
|
|
+ char *create_database_statement;
|
|
+
|
|
+ if (db_url == NULL)
|
|
+ csync_fatal("No memory for db_url\n");
|
|
+
|
|
+ strcpy(db_url, file);
|
|
+ int rc = db_mysql_parse_url(db_url, &host, &user, &pass, &database, &port, &unix_socket);
|
|
+ if (rc != DB_OK) {
|
|
+ return rc;
|
|
+ }
|
|
+
|
|
+ if (f.mysql_real_connect_fn(db, host, user, pass, database, port, unix_socket, 0) == NULL) {
|
|
+ if (f.mysql_errno_fn(db) == ER_BAD_DB_ERROR) {
|
|
+ if (f.mysql_real_connect_fn(db, host, user, pass, NULL, port, unix_socket, 0) != NULL) {
|
|
+ ASPRINTF(&create_database_statement, "create database %s", database);
|
|
+
|
|
+ csync_debug(2, "creating database %s\n", database);
|
|
+ if (f.mysql_query_fn(db, create_database_statement) != 0)
|
|
+ csync_fatal("Cannot create database %s: Error: %s\n", database, f.mysql_error_fn(db));
|
|
+ free(create_database_statement);
|
|
+
|
|
+ f.mysql_close_fn(db);
|
|
+ db = f.mysql_init_fn(0);
|
|
+
|
|
+ if (f.mysql_real_connect_fn(db, host, user, pass, database, port, unix_socket, 0) == NULL)
|
|
+ goto fatal;
|
|
+ }
|
|
+ } else
|
|
+fatal:
|
|
+ csync_fatal("Failed to connect to database: Error: %s\n", f.mysql_error_fn(db));
|
|
+ }
|
|
+
|
|
+ db_conn_p conn = calloc(1, sizeof(*conn));
|
|
+ if (conn == NULL) {
|
|
+ return DB_ERROR;
|
|
+ }
|
|
+ *conn_p = conn;
|
|
+ conn->private = db;
|
|
+ conn->close = db_mysql_close;
|
|
+ conn->exec = db_mysql_exec;
|
|
+ conn->prepare = db_mysql_prepare;
|
|
+ conn->errmsg = db_mysql_errmsg;
|
|
+ conn->upgrade_to_schema = db_mysql_upgrade_to_schema;
|
|
+
|
|
+ return rc;
|
|
+#else
|
|
+ return DB_ERROR;
|
|
+#endif
|
|
+}
|
|
+
|
|
+#ifdef HAVE_MYSQL
|
|
+
|
|
+void db_mysql_close(db_conn_p conn)
|
|
+{
|
|
+ if (!conn)
|
|
+ return;
|
|
+ if (!conn->private)
|
|
+ return;
|
|
+ f.mysql_close_fn(conn->private);
|
|
+ conn->private = 0;
|
|
+}
|
|
+
|
|
+const char *db_mysql_errmsg(db_conn_p conn)
|
|
+{
|
|
+ if (!conn)
|
|
+ return "(no connection)";
|
|
+ if (!conn->private)
|
|
+ return "(no private data in conn)";
|
|
+ return f.mysql_error_fn(conn->private);
|
|
+}
|
|
+
|
|
+static void print_warnings(int level, MYSQL *m)
|
|
+{
|
|
+ int rc;
|
|
+ MYSQL_RES *res;
|
|
+ int fields;
|
|
+ MYSQL_ROW row;
|
|
+
|
|
+ if (m == NULL)
|
|
+ csync_fatal("print_warnings: m is NULL");
|
|
+
|
|
+ rc = f.mysql_query_fn(m, "SHOW WARNINGS");
|
|
+ if (rc != 0)
|
|
+ csync_fatal("print_warnings: Failed to get warning messages");
|
|
+
|
|
+ res = f.mysql_store_result_fn(m);
|
|
+ if (res == NULL)
|
|
+ csync_fatal("print_warnings: Failed to get result set for warning messages");
|
|
+
|
|
+ fields = f.mysql_num_fields_fn(res);
|
|
+ if (fields < 2)
|
|
+ csync_fatal("print_warnings: Strange: show warnings result set has less than 2 rows");
|
|
+
|
|
+ row = f.mysql_fetch_row_fn(res);
|
|
+
|
|
+ while (row) {
|
|
+ csync_debug(level, "MySql Warning: %s\n", row[2]);
|
|
+ row = f.mysql_fetch_row_fn(res);
|
|
+ }
|
|
+
|
|
+ f.mysql_free_result_fn(res);
|
|
+}
|
|
+
|
|
+int db_mysql_exec(db_conn_p conn, const char *sql)
|
|
+{
|
|
+ int rc = DB_ERROR;
|
|
+ if (!conn)
|
|
+ return DB_NO_CONNECTION;
|
|
+
|
|
+ if (!conn->private) {
|
|
+ /* added error element */
|
|
+ return DB_NO_CONNECTION_REAL;
|
|
+ }
|
|
+ rc = f.mysql_query_fn(conn->private, sql);
|
|
+
|
|
+/* Treat warnings as errors. For example when a column is too short this should
|
|
+ be an error. */
|
|
+
|
|
+ if (f.mysql_warning_count_fn(conn->private) > 0) {
|
|
+ print_warnings(1, conn->private);
|
|
+ return DB_ERROR;
|
|
+ }
|
|
+
|
|
+ /* On error parse, create DB ERROR element */
|
|
+ return rc;
|
|
+}
|
|
+
|
|
+int db_mysql_prepare(db_conn_p conn, const char *sql, db_stmt_p *stmt_p,
|
|
+ char **pptail) {
|
|
+ int rc = DB_ERROR;
|
|
+
|
|
+ *stmt_p = NULL;
|
|
+
|
|
+ if (!conn)
|
|
+ return DB_NO_CONNECTION;
|
|
+
|
|
+ if (!conn->private) {
|
|
+ /* added error element */
|
|
+ return DB_NO_CONNECTION_REAL;
|
|
+ }
|
|
+ db_stmt_p stmt = malloc(sizeof(*stmt));
|
|
+ /* TODO avoid strlen, use configurable limit? */
|
|
+ rc = f.mysql_query_fn(conn->private, sql);
|
|
+
|
|
+/* Treat warnings as errors. For example when a column is too short this should
|
|
+ be an error. */
|
|
+
|
|
+ if (f.mysql_warning_count_fn(conn->private) > 0) {
|
|
+ print_warnings(1, conn->private);
|
|
+ return DB_ERROR;
|
|
+ }
|
|
+
|
|
+ MYSQL_RES *mysql_stmt = f.mysql_store_result_fn(conn->private);
|
|
+ if (mysql_stmt == NULL) {
|
|
+ csync_debug(2, "Error in mysql_store_result: %s", f.mysql_error_fn(conn->private));
|
|
+ return DB_ERROR;
|
|
+ }
|
|
+
|
|
+/* Treat warnings as errors. For example when a column is too short this should
|
|
+ be an error. */
|
|
+
|
|
+ if (f.mysql_warning_count_fn(conn->private) > 0) {
|
|
+ print_warnings(1, conn->private);
|
|
+ return DB_ERROR;
|
|
+ }
|
|
+
|
|
+ stmt->private = mysql_stmt;
|
|
+ /* TODO error mapping / handling */
|
|
+ *stmt_p = stmt;
|
|
+ stmt->get_column_text = db_mysql_stmt_get_column_text;
|
|
+ stmt->get_column_blob = db_mysql_stmt_get_column_blob;
|
|
+ stmt->get_column_int = db_mysql_stmt_get_column_int;
|
|
+ stmt->next = db_mysql_stmt_next;
|
|
+ stmt->close = db_mysql_stmt_close;
|
|
+ stmt->db = conn;
|
|
+ return DB_OK;
|
|
+}
|
|
+
|
|
+const void* db_mysql_stmt_get_column_blob(db_stmt_p stmt, int column) {
|
|
+ if (!stmt || !stmt->private2) {
|
|
+ return 0;
|
|
+ }
|
|
+ MYSQL_ROW row = stmt->private2;
|
|
+ return row[column];
|
|
+}
|
|
+
|
|
+const char *db_mysql_stmt_get_column_text(db_stmt_p stmt, int column) {
|
|
+ if (!stmt || !stmt->private2) {
|
|
+ return 0;
|
|
+ }
|
|
+ MYSQL_ROW row = stmt->private2;
|
|
+ return row[column];
|
|
+}
|
|
+
|
|
+int db_mysql_stmt_get_column_int(db_stmt_p stmt, int column) {
|
|
+ const char *value = db_mysql_stmt_get_column_text(stmt, column);
|
|
+ if (value)
|
|
+ return atoi(value);
|
|
+ /* error mapping */
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+
|
|
+int db_mysql_stmt_next(db_stmt_p stmt)
|
|
+{
|
|
+ MYSQL_RES *mysql_stmt = stmt->private;
|
|
+ stmt->private2 = f.mysql_fetch_row_fn(mysql_stmt);
|
|
+ /* error mapping */
|
|
+ if (stmt->private2)
|
|
+ return DB_ROW;
|
|
+ return DB_DONE;
|
|
+}
|
|
+
|
|
+int db_mysql_stmt_close(db_stmt_p stmt)
|
|
+{
|
|
+ MYSQL_RES *mysql_stmt = stmt->private;
|
|
+ f.mysql_free_result_fn(mysql_stmt);
|
|
+ free(stmt);
|
|
+ return DB_OK;
|
|
+}
|
|
+
|
|
+
|
|
+int db_mysql_upgrade_to_schema(int version)
|
|
+{
|
|
+ if (version < 0)
|
|
+ return DB_OK;
|
|
+
|
|
+ if (version > 0)
|
|
+ return DB_ERROR;
|
|
+
|
|
+ csync_debug(2, "Upgrading database schema to version %d.\n", version);
|
|
+
|
|
+/* We want proper logging, so use the csync sql function instead
|
|
+ * of that from the database layer.
|
|
+ */
|
|
+ csync_db_sql("Creating action table",
|
|
+ "CREATE TABLE `action` ("
|
|
+ " `filename` varchar(4096) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT NULL,"
|
|
+ " `command` text,"
|
|
+ " `logfile` text,"
|
|
+ " UNIQUE KEY `filename` (`filename`(326),`command`(20))"
|
|
+ ")");
|
|
+
|
|
+ csync_db_sql("Creating dirty table",
|
|
+ "CREATE TABLE `dirty` ("
|
|
+ " `filename` varchar(4096) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT NULL,"
|
|
+ " `forced` int(11) DEFAULT NULL,"
|
|
+ " `myname` varchar(50) DEFAULT NULL,"
|
|
+ " `peername` varchar(50) DEFAULT NULL,"
|
|
+ " UNIQUE KEY `filename` (`filename`(316),`peername`),"
|
|
+ " KEY `dirty_host` (`peername`(10))"
|
|
+ ")");
|
|
+
|
|
+ csync_db_sql("Creating file table",
|
|
+ "CREATE TABLE `file` ("
|
|
+ " `filename` varchar(4096) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT NULL,"
|
|
+ " `checktxt` varchar(200) DEFAULT NULL,"
|
|
+ " UNIQUE KEY `filename` (`filename`(333))"
|
|
+ ")");
|
|
+
|
|
+ csync_db_sql("Creating hint table",
|
|
+ "CREATE TABLE `hint` ("
|
|
+ " `filename` varchar(4096) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT NULL,"
|
|
+ " `recursive` int(11) DEFAULT NULL"
|
|
+ ")");
|
|
+
|
|
+ csync_db_sql("Creating x509_cert table",
|
|
+ "CREATE TABLE `x509_cert` ("
|
|
+ " `peername` varchar(50) DEFAULT NULL,"
|
|
+ " `certdata` varchar(255) DEFAULT NULL,"
|
|
+ " UNIQUE KEY `peername` (`peername`)"
|
|
+ ")");
|
|
+
|
|
+/* csync_db_sql does a csync_fatal on error, so we always return DB_OK here. */
|
|
+
|
|
+ return DB_OK;
|
|
+}
|
|
+
|
|
+
|
|
+#endif
|
|
diff --git a/db_mysql.h b/db_mysql.h
|
|
new file mode 100644
|
|
index 0000000..c5aeab3
|
|
--- /dev/null
|
|
+++ b/db_mysql.h
|
|
@@ -0,0 +1,19 @@
|
|
+
|
|
+#ifndef DB_MYSQL_H
|
|
+#define DB_MYSQL_H
|
|
+
|
|
+/* public */
|
|
+int db_mysql_open(const char *file, db_conn_p *conn_p);
|
|
+/* Private */
|
|
+void db_mysql_close(db_conn_p db_conn);
|
|
+int db_mysql_exec(db_conn_p conn, const char *sql);
|
|
+int db_mysql_prepare(db_conn_p conn, const char *sql, db_stmt_p *stmt_p, char **pptail);
|
|
+int db_mysql_stmt_next(db_stmt_p stmt);
|
|
+const void* db_mysql_stmt_get_column_blob(db_stmt_p stmt, int column);
|
|
+const char *db_mysql_stmt_get_column_text(db_stmt_p stmt, int column);
|
|
+int db_mysql_stmt_get_column_int(db_stmt_p stmt, int column);
|
|
+int db_mysql_stmt_close(db_stmt_p stmt);
|
|
+const char *db_mysql_errmsg(db_conn_p db_conn);
|
|
+int db_mysql_upgrade_to_schema(int version);
|
|
+
|
|
+#endif
|
|
diff --git a/db_postgres.c b/db_postgres.c
|
|
new file mode 100644
|
|
index 0000000..b40bdfb
|
|
--- /dev/null
|
|
+++ b/db_postgres.c
|
|
@@ -0,0 +1,458 @@
|
|
+/*
|
|
+ * Copyright (C) 2010 Dennis Schafroth <dennis@schafroth.com>
|
|
+ * Copyright (C) 2010 Johannes Thoma <johannes.thoma@gmx.at>
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or modify
|
|
+ * it under the terms of the GNU General Public License as published by
|
|
+ * the Free Software Foundation; either version 2 of the License, or
|
|
+ * (at your option) any later version.
|
|
+ *
|
|
+ * This program is distributed in the hope that it will be useful,
|
|
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
+ * GNU General Public License for more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License
|
|
+ * along with this program; if not, write to the Free Software
|
|
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
|
+ */
|
|
+
|
|
+#include "csync2.h"
|
|
+#include <stdio.h>
|
|
+#include <stdarg.h>
|
|
+#include <stdlib.h>
|
|
+#include <unistd.h>
|
|
+#include <signal.h>
|
|
+#include <time.h>
|
|
+#include <string.h>
|
|
+#include "db_api.h"
|
|
+#include "db_postgres.h"
|
|
+#include "dl.h"
|
|
+
|
|
+#ifdef HAVE_POSTGRES
|
|
+#include <postgresql/libpq-fe.h>
|
|
+#endif
|
|
+
|
|
+#if (!defined HAVE_POSTGRES)
|
|
+int db_postgres_open(const char *file, db_conn_p *conn_p)
|
|
+{
|
|
+ return DB_ERROR;
|
|
+}
|
|
+#else
|
|
+
|
|
+static struct db_postgres_fns {
|
|
+ PGconn *(*PQconnectdb_fn)(char *);
|
|
+ ConnStatusType (*PQstatus_fn)(const PGconn *);
|
|
+ char *(*PQerrorMessage_fn)(const PGconn *);
|
|
+ void (*PQfinish_fn)(PGconn *);
|
|
+ PGresult *(*PQexec_fn)(PGconn *, const char *);
|
|
+ ExecStatusType (*PQresultStatus_fn)(const PGresult *);
|
|
+ char *(*PQresultErrorMessage_fn)(const PGresult *);
|
|
+ void (*PQclear_fn)(PGresult *);
|
|
+ int (*PQntuples_fn)(const PGresult *);
|
|
+ char *(*PQgetvalue_fn)(const PGresult *, int, int);
|
|
+} f;
|
|
+
|
|
+static void *dl_handle;
|
|
+
|
|
+
|
|
+static void db_postgres_dlopen(void)
|
|
+{
|
|
+ csync_debug(1, "Opening shared library libpq.so\n");
|
|
+
|
|
+ dl_handle = dlopen("libpq.so", RTLD_LAZY);
|
|
+ if (dl_handle == NULL) {
|
|
+ csync_fatal("Could not open libpq.so: %s\nPlease install postgres client library (libpg) or use other database (sqlite, mysql)\n", dlerror());
|
|
+ }
|
|
+ csync_debug(1, "Reading symbols from shared library libpq.so\n");
|
|
+
|
|
+ LOOKUP_SYMBOL(dl_handle, PQconnectdb);
|
|
+ LOOKUP_SYMBOL(dl_handle, PQstatus);
|
|
+ LOOKUP_SYMBOL(dl_handle, PQerrorMessage);
|
|
+ LOOKUP_SYMBOL(dl_handle, PQfinish);
|
|
+ LOOKUP_SYMBOL(dl_handle, PQexec);
|
|
+ LOOKUP_SYMBOL(dl_handle, PQresultStatus);
|
|
+ LOOKUP_SYMBOL(dl_handle, PQresultErrorMessage);
|
|
+ LOOKUP_SYMBOL(dl_handle, PQclear);
|
|
+ LOOKUP_SYMBOL(dl_handle, PQntuples);
|
|
+ LOOKUP_SYMBOL(dl_handle, PQgetvalue);
|
|
+}
|
|
+
|
|
+
|
|
+
|
|
+/* Thi function parses a URL string like pgsql://[user[:passwd]@]hostname[:port]/database.
|
|
+ and returns the result in the given parameters.
|
|
+
|
|
+ If an optional keyword is not given, the value of the parameter is not changed.
|
|
+*/
|
|
+
|
|
+static int db_pgsql_parse_url(char *url, char **host, char **user, char **pass, char **database, unsigned int *port)
|
|
+{
|
|
+ char *pos = strchr(url, '@');
|
|
+ if (pos) {
|
|
+ *(pos) = 0;
|
|
+ *(user) = url;
|
|
+ url = pos + 1;
|
|
+
|
|
+ pos = strchr(*user, ':');
|
|
+ if (pos) {
|
|
+ *(pos) = 0;
|
|
+ *(pass) = (pos +1);
|
|
+ }
|
|
+ }
|
|
+ *host = url;
|
|
+ pos = strchr(*host, '/');
|
|
+ if (pos) {
|
|
+ // Database
|
|
+ (*pos) = 0;
|
|
+ *database = pos+1;
|
|
+ }
|
|
+ pos = strchr(*host, ':');
|
|
+ if (pos) {
|
|
+ (*pos) = 0;
|
|
+ *port = atoi(pos+1);
|
|
+ }
|
|
+ return DB_OK;
|
|
+}
|
|
+
|
|
+int db_postgres_open(const char *file, db_conn_p *conn_p)
|
|
+{
|
|
+ PGconn *pg_conn;
|
|
+ char *host, *user, *pass, *database;
|
|
+ unsigned int port = 5432; /* default postgres port */
|
|
+ char *db_url = malloc(strlen(file)+1);
|
|
+ char *create_database_statement;
|
|
+ char *pg_conn_info;
|
|
+
|
|
+ db_postgres_dlopen();
|
|
+
|
|
+ if (db_url == NULL)
|
|
+ csync_fatal("No memory for db_url\n");
|
|
+
|
|
+ user = "postgres";
|
|
+ pass = "";
|
|
+ host = "localhost";
|
|
+ database = "csync2";
|
|
+
|
|
+ strcpy(db_url, file);
|
|
+ int rc = db_pgsql_parse_url(db_url, &host, &user, &pass, &database, &port);
|
|
+ if (rc != DB_OK)
|
|
+ return rc;
|
|
+
|
|
+ ASPRINTF(&pg_conn_info, "host='%s' user='%s' password='%s' dbname='%s' port=%d",
|
|
+ host, user, pass, database, port);
|
|
+
|
|
+ pg_conn = f.PQconnectdb_fn(pg_conn_info);
|
|
+ if (pg_conn == NULL)
|
|
+ csync_fatal("No memory for postgress connection handle\n");
|
|
+
|
|
+ if (f.PQstatus_fn(pg_conn) != CONNECTION_OK) {
|
|
+ f.PQfinish_fn(pg_conn);
|
|
+ free(pg_conn_info);
|
|
+
|
|
+ ASPRINTF(&pg_conn_info, "host='%s' user='%s' password='%s' dbname='postgres' port=%d",
|
|
+ host, user, pass, port);
|
|
+
|
|
+ pg_conn = f.PQconnectdb_fn(pg_conn_info);
|
|
+ if (pg_conn == NULL)
|
|
+ csync_fatal("No memory for postgress connection handle\n");
|
|
+
|
|
+ if (f.PQstatus_fn(pg_conn) != CONNECTION_OK) {
|
|
+ csync_debug(0, "Connection failed: %s", f.PQerrorMessage_fn(pg_conn));
|
|
+ f.PQfinish_fn(pg_conn);
|
|
+ free(pg_conn_info);
|
|
+ return DB_ERROR;
|
|
+ } else {
|
|
+ char *create_database_statement;
|
|
+ PGresult *res;
|
|
+
|
|
+ csync_debug(1, "Database %s not found, trying to create it ...", database);
|
|
+ ASPRINTF(&create_database_statement, "create database %s", database);
|
|
+ res = f.PQexec_fn(pg_conn, create_database_statement);
|
|
+
|
|
+ free(create_database_statement);
|
|
+
|
|
+ switch (f.PQresultStatus_fn(res)) {
|
|
+ case PGRES_COMMAND_OK:
|
|
+ case PGRES_TUPLES_OK:
|
|
+ break;
|
|
+
|
|
+ default:
|
|
+ csync_debug(0, "Could not create database %s: %s", database, f.PQerrorMessage_fn(pg_conn));
|
|
+ return DB_ERROR;
|
|
+ }
|
|
+
|
|
+ f.PQfinish_fn(pg_conn);
|
|
+ free(pg_conn_info);
|
|
+
|
|
+ ASPRINTF(&pg_conn_info, "host='%s' user='%s' password='%s' dbname='%s' port=%d",
|
|
+ host, user, pass, database, port);
|
|
+
|
|
+ pg_conn = f.PQconnectdb_fn(pg_conn_info);
|
|
+ if (pg_conn == NULL)
|
|
+ csync_fatal("No memory for postgress connection handle\n");
|
|
+
|
|
+ if (f.PQstatus_fn(pg_conn) != CONNECTION_OK) {
|
|
+ csync_debug(0, "Connection failed: %s", f.PQerrorMessage_fn(pg_conn));
|
|
+ f.PQfinish_fn(pg_conn);
|
|
+ free(pg_conn_info);
|
|
+ return DB_ERROR;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ db_conn_p conn = calloc(1, sizeof(*conn));
|
|
+
|
|
+ if (conn == NULL)
|
|
+ csync_fatal("No memory for conn\n");
|
|
+
|
|
+ *conn_p = conn;
|
|
+ conn->private = pg_conn;
|
|
+ conn->close = db_postgres_close;
|
|
+ conn->exec = db_postgres_exec;
|
|
+ conn->errmsg = db_postgres_errmsg;
|
|
+ conn->prepare = db_postgres_prepare;
|
|
+ conn->upgrade_to_schema = db_postgres_upgrade_to_schema;
|
|
+
|
|
+ free(pg_conn_info);
|
|
+
|
|
+ return DB_OK;
|
|
+}
|
|
+
|
|
+
|
|
+void db_postgres_close(db_conn_p conn)
|
|
+{
|
|
+ if (!conn)
|
|
+ return;
|
|
+ if (!conn->private)
|
|
+ return;
|
|
+ f.PQfinish_fn(conn->private);
|
|
+ conn->private = 0;
|
|
+}
|
|
+
|
|
+const char *db_postgres_errmsg(db_conn_p conn)
|
|
+{
|
|
+ if (!conn)
|
|
+ return "(no connection)";
|
|
+ if (!conn->private)
|
|
+ return "(no private data in conn)";
|
|
+ return f.PQerrorMessage_fn(conn->private);
|
|
+}
|
|
+
|
|
+
|
|
+int db_postgres_exec(db_conn_p conn, const char *sql)
|
|
+{
|
|
+ PGresult *res;
|
|
+
|
|
+ if (!conn)
|
|
+ return DB_NO_CONNECTION;
|
|
+
|
|
+ if (!conn->private) {
|
|
+ /* added error element */
|
|
+ return DB_NO_CONNECTION_REAL;
|
|
+ }
|
|
+ res = f.PQexec_fn(conn->private, sql);
|
|
+ switch (f.PQresultStatus_fn(res)) {
|
|
+ case PGRES_COMMAND_OK:
|
|
+ case PGRES_TUPLES_OK:
|
|
+ return DB_OK;
|
|
+
|
|
+ default:
|
|
+ return DB_ERROR;
|
|
+ }
|
|
+}
|
|
+
|
|
+
|
|
+int db_postgres_prepare(db_conn_p conn, const char *sql, db_stmt_p *stmt_p,
|
|
+ char **pptail)
|
|
+{
|
|
+ PGresult *result;
|
|
+ int *row_p;
|
|
+
|
|
+ *stmt_p = NULL;
|
|
+
|
|
+ if (!conn)
|
|
+ return DB_NO_CONNECTION;
|
|
+
|
|
+ if (!conn->private) {
|
|
+ /* added error element */
|
|
+ return DB_NO_CONNECTION_REAL;
|
|
+ }
|
|
+ result = f.PQexec_fn(conn->private, sql);
|
|
+
|
|
+ if (result == NULL)
|
|
+ csync_fatal("No memory for result\n");
|
|
+
|
|
+ switch (f.PQresultStatus_fn(result)) {
|
|
+ case PGRES_COMMAND_OK:
|
|
+ case PGRES_TUPLES_OK:
|
|
+ break;
|
|
+
|
|
+ default:
|
|
+ csync_debug(1, "Error in PQexec: %s", f.PQresultErrorMessage_fn(result));
|
|
+ f.PQclear_fn(result);
|
|
+ return DB_ERROR;
|
|
+ }
|
|
+
|
|
+ row_p = malloc(sizeof(*row_p));
|
|
+ if (row_p == NULL)
|
|
+ csync_fatal("No memory for row\n");
|
|
+ *row_p = -1;
|
|
+
|
|
+ db_stmt_p stmt = malloc(sizeof(*stmt));
|
|
+ if (stmt == NULL)
|
|
+ csync_fatal("No memory for stmt\n");
|
|
+
|
|
+ stmt->private = result;
|
|
+ stmt->private2 = row_p;
|
|
+
|
|
+ *stmt_p = stmt;
|
|
+ stmt->get_column_text = db_postgres_stmt_get_column_text;
|
|
+ stmt->get_column_blob = db_postgres_stmt_get_column_blob;
|
|
+ stmt->get_column_int = db_postgres_stmt_get_column_int;
|
|
+ stmt->next = db_postgres_stmt_next;
|
|
+ stmt->close = db_postgres_stmt_close;
|
|
+ stmt->db = conn;
|
|
+ return DB_OK;
|
|
+}
|
|
+
|
|
+
|
|
+const void* db_postgres_stmt_get_column_blob(db_stmt_p stmt, int column)
|
|
+{
|
|
+ PGresult *result;
|
|
+ int *row_p;
|
|
+
|
|
+ if (!stmt || !stmt->private || !stmt->private2) {
|
|
+ return 0;
|
|
+ }
|
|
+ result = (PGresult*)stmt->private;
|
|
+ row_p = (int*)stmt->private2;
|
|
+
|
|
+ if (*row_p >= f.PQntuples_fn(result) || *row_p < 0) {
|
|
+ csync_debug(1, "row index out of range (should be between 0 and %d, is %d)\n",
|
|
+ *row_p, f.PQntuples_fn(result));
|
|
+ return NULL;
|
|
+ }
|
|
+ return f.PQgetvalue_fn(result, *row_p, column);
|
|
+}
|
|
+
|
|
+const char *db_postgres_stmt_get_column_text(db_stmt_p stmt, int column)
|
|
+{
|
|
+ PGresult *result;
|
|
+ int *row_p;
|
|
+
|
|
+ if (!stmt || !stmt->private || !stmt->private2) {
|
|
+ return 0;
|
|
+ }
|
|
+ result = (PGresult*)stmt->private;
|
|
+ row_p = (int*)stmt->private2;
|
|
+
|
|
+ if (*row_p >= f.PQntuples_fn(result) || *row_p < 0) {
|
|
+ csync_debug(1, "row index out of range (should be between 0 and %d, is %d)\n",
|
|
+ *row_p, f.PQntuples_fn(result));
|
|
+ return NULL;
|
|
+ }
|
|
+ return f.PQgetvalue_fn(result, *row_p, column);
|
|
+}
|
|
+
|
|
+int db_postgres_stmt_get_column_int(db_stmt_p stmt, int column)
|
|
+{
|
|
+ PGresult *result;
|
|
+ int *row_p;
|
|
+
|
|
+ if (!stmt || !stmt->private || !stmt->private2) {
|
|
+ return 0;
|
|
+ }
|
|
+ result = (PGresult*)stmt->private;
|
|
+ row_p = (int*)stmt->private2;
|
|
+
|
|
+ if (*row_p >= f.PQntuples_fn(result) || *row_p < 0) {
|
|
+ csync_debug(1, "row index out of range (should be between 0 and %d, is %d)\n",
|
|
+ *row_p, f.PQntuples_fn(result));
|
|
+ return 0;
|
|
+ }
|
|
+ return atoi(f.PQgetvalue_fn(result, *row_p, column));
|
|
+}
|
|
+
|
|
+
|
|
+int db_postgres_stmt_next(db_stmt_p stmt)
|
|
+{
|
|
+ PGresult *result;
|
|
+ int *row_p;
|
|
+
|
|
+ if (!stmt || !stmt->private || !stmt->private2) {
|
|
+ return 0;
|
|
+ }
|
|
+ result = (PGresult*)stmt->private;
|
|
+ row_p = (int*)stmt->private2;
|
|
+
|
|
+ (*row_p)++;
|
|
+ if (*row_p >= f.PQntuples_fn(result))
|
|
+ return DB_DONE;
|
|
+
|
|
+ return DB_ROW;
|
|
+}
|
|
+
|
|
+int db_postgres_stmt_close(db_stmt_p stmt)
|
|
+{
|
|
+ PGresult *res = stmt->private;
|
|
+
|
|
+ f.PQclear_fn(res);
|
|
+ free(stmt->private2);
|
|
+ free(stmt);
|
|
+ return DB_OK;
|
|
+}
|
|
+
|
|
+
|
|
+int db_postgres_upgrade_to_schema(int version)
|
|
+{
|
|
+ if (version < 0)
|
|
+ return DB_OK;
|
|
+
|
|
+ if (version > 0)
|
|
+ return DB_ERROR;
|
|
+
|
|
+ csync_debug(2, "Upgrading database schema to version %d.\n", version);
|
|
+
|
|
+ csync_db_sql("Creating action table",
|
|
+"CREATE TABLE action ("
|
|
+" filename varchar(255) DEFAULT NULL,"
|
|
+" command text,"
|
|
+" logfile text,"
|
|
+" UNIQUE (filename,command)"
|
|
+");");
|
|
+
|
|
+ csync_db_sql("Creating dirty table",
|
|
+"CREATE TABLE dirty ("
|
|
+" filename varchar(200) DEFAULT NULL,"
|
|
+" forced int DEFAULT NULL,"
|
|
+" myname varchar(100) DEFAULT NULL,"
|
|
+" peername varchar(100) DEFAULT NULL,"
|
|
+" UNIQUE (filename,peername)"
|
|
+");");
|
|
+
|
|
+ csync_db_sql("Creating file table",
|
|
+"CREATE TABLE file ("
|
|
+" filename varchar(200) DEFAULT NULL,"
|
|
+" checktxt varchar(200) DEFAULT NULL,"
|
|
+" UNIQUE (filename)"
|
|
+");");
|
|
+
|
|
+ csync_db_sql("Creating hint table",
|
|
+"CREATE TABLE hint ("
|
|
+" filename varchar(255) DEFAULT NULL,"
|
|
+" recursive int DEFAULT NULL"
|
|
+");");
|
|
+
|
|
+ csync_db_sql("Creating x509_cert table",
|
|
+"CREATE TABLE x509_cert ("
|
|
+" peername varchar(255) DEFAULT NULL,"
|
|
+" certdata varchar(255) DEFAULT NULL,"
|
|
+" UNIQUE (peername)"
|
|
+");");
|
|
+
|
|
+ return DB_OK;
|
|
+}
|
|
+
|
|
+
|
|
+#endif /* HAVE_POSTGRES */
|
|
diff --git a/db_postgres.h b/db_postgres.h
|
|
new file mode 100644
|
|
index 0000000..949439e
|
|
--- /dev/null
|
|
+++ b/db_postgres.h
|
|
@@ -0,0 +1,20 @@
|
|
+
|
|
+#ifndef DB_POSTGRES_H
|
|
+#define DB_POSTGRES_H
|
|
+
|
|
+/* public */
|
|
+int db_postgres_open(const char *file, db_conn_p *conn_p);
|
|
+/* Private */
|
|
+void db_postgres_close(db_conn_p db_conn);
|
|
+int db_postgres_exec(db_conn_p conn, const char *sql);
|
|
+int db_postgres_prepare(db_conn_p conn, const char *sql, db_stmt_p *stmt_p, char **pptail);
|
|
+const char *db_postgres_errmsg(db_conn_p db_conn);
|
|
+
|
|
+int db_postgres_stmt_next(db_stmt_p stmt);
|
|
+const void* db_postgres_stmt_get_column_blob(db_stmt_p stmt, int column);
|
|
+const char *db_postgres_stmt_get_column_text(db_stmt_p stmt, int column);
|
|
+int db_postgres_stmt_get_column_int(db_stmt_p stmt, int column);
|
|
+int db_postgres_stmt_close(db_stmt_p stmt);
|
|
+int db_postgres_upgrade_to_schema(int version);
|
|
+
|
|
+#endif
|
|
diff --git a/db_sqlite.c b/db_sqlite.c
|
|
new file mode 100644
|
|
index 0000000..81c5c75
|
|
--- /dev/null
|
|
+++ b/db_sqlite.c
|
|
@@ -0,0 +1,263 @@
|
|
+/*
|
|
+ * Copyright (C) 2010 Dennis Schafroth <dennis@schafroth.com>>
|
|
+ * Copyright (C) 2010 Johannes Thoma <johannes.thoma@gmx.at>
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or modify
|
|
+ * it under the terms of the GNU General Public License as published by
|
|
+ * the Free Software Foundation; either version 2 of the License, or
|
|
+ * (at your option) any later version.
|
|
+ *
|
|
+ * This program is distributed in the hope that it will be useful,
|
|
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
+ * GNU General Public License for more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License
|
|
+ * along with this program; if not, write to the Free Software
|
|
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
|
+ */
|
|
+
|
|
+#include "csync2.h"
|
|
+#if defined(HAVE_SQLITE3)
|
|
+#include <sqlite3.h>
|
|
+#endif
|
|
+#include <stdio.h>
|
|
+#include <stdarg.h>
|
|
+#include <stdlib.h>
|
|
+#include <unistd.h>
|
|
+#include <signal.h>
|
|
+#include <time.h>
|
|
+#include "db_api.h"
|
|
+#include "db_sqlite.h"
|
|
+#include "dl.h"
|
|
+
|
|
+#ifndef HAVE_SQLITE3
|
|
+int db_sqlite_open(const char *file, db_conn_p *conn_p) {
|
|
+ return DB_ERROR;
|
|
+}
|
|
+#else
|
|
+
|
|
+static struct db_sqlite3_fns {
|
|
+ int (*sqlite3_open_fn) (const char*, sqlite3 **);
|
|
+ int (*sqlite3_close_fn) (sqlite3 *);
|
|
+ const char *(*sqlite3_errmsg_fn) (sqlite3 *);
|
|
+ int (*sqlite3_exec_fn) (sqlite3*, const char *,
|
|
+ int (*) (void*,int,char**,char**), void*, char **);
|
|
+ int (*sqlite3_prepare_v2_fn)(sqlite3 *, const char *, int,
|
|
+ sqlite3_stmt **, const char **pzTail);
|
|
+ const unsigned char *(*sqlite3_column_text_fn)(sqlite3_stmt*, int);
|
|
+ const void *(*sqlite3_column_blob_fn)(sqlite3_stmt*, int);
|
|
+ int (*sqlite3_column_int_fn)(sqlite3_stmt*, int);
|
|
+ int (*sqlite3_step_fn)(sqlite3_stmt*);
|
|
+ int (*sqlite3_finalize_fn)(sqlite3_stmt *);
|
|
+} f;
|
|
+
|
|
+static void *dl_handle;
|
|
+
|
|
+
|
|
+static void db_sqlite3_dlopen(void)
|
|
+{
|
|
+ csync_debug(1, "Opening shared library libsqlite3.so\n");
|
|
+
|
|
+ dl_handle = dlopen("libsqlite3.so", RTLD_LAZY);
|
|
+ if (dl_handle == NULL) {
|
|
+ csync_fatal("Could not open libsqlite3.so: %s\nPlease install sqlite3 client library (libsqlite3) or use other database (postgres, mysql)\n", dlerror());
|
|
+ }
|
|
+ csync_debug(1, "Reading symbols from shared library libsqlite3.so\n");
|
|
+
|
|
+ LOOKUP_SYMBOL(dl_handle, sqlite3_open);
|
|
+ LOOKUP_SYMBOL(dl_handle, sqlite3_close);
|
|
+ LOOKUP_SYMBOL(dl_handle, sqlite3_errmsg);
|
|
+ LOOKUP_SYMBOL(dl_handle, sqlite3_exec);
|
|
+ LOOKUP_SYMBOL(dl_handle, sqlite3_prepare_v2);
|
|
+ LOOKUP_SYMBOL(dl_handle, sqlite3_column_text);
|
|
+ LOOKUP_SYMBOL(dl_handle, sqlite3_column_blob);
|
|
+ LOOKUP_SYMBOL(dl_handle, sqlite3_column_int);
|
|
+ LOOKUP_SYMBOL(dl_handle, sqlite3_step);
|
|
+ LOOKUP_SYMBOL(dl_handle, sqlite3_finalize);
|
|
+}
|
|
+
|
|
+static int sqlite_errors[] = { SQLITE_OK, SQLITE_ERROR, SQLITE_BUSY, SQLITE_ROW, SQLITE_DONE, -1 };
|
|
+static int db_errors[] = { DB_OK, DB_ERROR, DB_BUSY, DB_ROW, DB_DONE, -1 };
|
|
+
|
|
+int db_sqlite_error_map(int sqlite_err) {
|
|
+ int index;
|
|
+ for (index = 0; ; index++) {
|
|
+ if (sqlite_errors[index] == -1)
|
|
+ return DB_ERROR;
|
|
+ if (sqlite_err == sqlite_errors[index])
|
|
+ return db_errors[index];
|
|
+ }
|
|
+}
|
|
+
|
|
+int db_sqlite_open(const char *file, db_conn_p *conn_p)
|
|
+{
|
|
+ sqlite3 *db;
|
|
+
|
|
+ db_sqlite3_dlopen();
|
|
+
|
|
+ int rc = f.sqlite3_open_fn(file, &db);
|
|
+ if ( rc != SQLITE_OK ) {
|
|
+ return db_sqlite_error_map(rc);
|
|
+ };
|
|
+ db_conn_p conn = calloc(1, sizeof(*conn));
|
|
+ if (conn == NULL) {
|
|
+ return DB_ERROR;
|
|
+ }
|
|
+ *conn_p = conn;
|
|
+ conn->private = db;
|
|
+ conn->close = db_sqlite_close;
|
|
+ conn->exec = db_sqlite_exec;
|
|
+ conn->prepare = db_sqlite_prepare;
|
|
+ conn->errmsg = db_sqlite_errmsg;
|
|
+ conn->upgrade_to_schema = db_sqlite_upgrade_to_schema;
|
|
+ return db_sqlite_error_map(rc);
|
|
+}
|
|
+
|
|
+void db_sqlite_close(db_conn_p conn)
|
|
+{
|
|
+ if (!conn)
|
|
+ return;
|
|
+ if (!conn->private)
|
|
+ return;
|
|
+ f.sqlite3_close_fn(conn->private);
|
|
+ conn->private = 0;
|
|
+}
|
|
+
|
|
+const char *db_sqlite_errmsg(db_conn_p conn)
|
|
+{
|
|
+ if (!conn)
|
|
+ return "(no connection)";
|
|
+ if (!conn->private)
|
|
+ return "(no private data in conn)";
|
|
+ return f.sqlite3_errmsg_fn(conn->private);
|
|
+}
|
|
+
|
|
+int db_sqlite_exec(db_conn_p conn, const char *sql) {
|
|
+ int rc;
|
|
+ if (!conn)
|
|
+ return DB_NO_CONNECTION;
|
|
+
|
|
+ if (!conn->private) {
|
|
+ /* added error element */
|
|
+ return DB_NO_CONNECTION_REAL;
|
|
+ }
|
|
+ rc = f.sqlite3_exec_fn(conn->private, sql, 0, 0, 0);
|
|
+ return db_sqlite_error_map(rc);
|
|
+}
|
|
+
|
|
+int db_sqlite_prepare(db_conn_p conn, const char *sql, db_stmt_p *stmt_p, char **pptail) {
|
|
+ int rc;
|
|
+
|
|
+ *stmt_p = NULL;
|
|
+
|
|
+ if (!conn)
|
|
+ return DB_NO_CONNECTION;
|
|
+
|
|
+ if (!conn->private) {
|
|
+ /* added error element */
|
|
+ return DB_NO_CONNECTION_REAL;
|
|
+ }
|
|
+ db_stmt_p stmt = malloc(sizeof(*stmt));
|
|
+ sqlite3_stmt *sqlite_stmt = 0;
|
|
+ /* TODO avoid strlen, use configurable limit? */
|
|
+ rc = f.sqlite3_prepare_v2_fn(conn->private, sql, strlen(sql), &sqlite_stmt, (const char **) pptail);
|
|
+ if (rc != SQLITE_OK)
|
|
+ return db_sqlite_error_map(rc);
|
|
+ stmt->private = sqlite_stmt;
|
|
+ *stmt_p = stmt;
|
|
+ stmt->get_column_text = db_sqlite_stmt_get_column_text;
|
|
+ stmt->get_column_blob = db_sqlite_stmt_get_column_blob;
|
|
+ stmt->get_column_int = db_sqlite_stmt_get_column_int;
|
|
+ stmt->next = db_sqlite_stmt_next;
|
|
+ stmt->close = db_sqlite_stmt_close;
|
|
+ stmt->db = conn;
|
|
+ return db_sqlite_error_map(rc);
|
|
+}
|
|
+
|
|
+const char *db_sqlite_stmt_get_column_text(db_stmt_p stmt, int column) {
|
|
+ if (!stmt || !stmt->private) {
|
|
+ return 0;
|
|
+ }
|
|
+ sqlite3_stmt *sqlite_stmt = stmt->private;
|
|
+ const unsigned char *result = f.sqlite3_column_text_fn(sqlite_stmt, column);
|
|
+ /* error handling */
|
|
+ return (const char*)result;
|
|
+}
|
|
+
|
|
+#if defined(HAVE_SQLITE3)
|
|
+const void* db_sqlite_stmt_get_column_blob(db_stmt_p stmtx, int col) {
|
|
+ sqlite3_stmt *stmt = stmtx->private;
|
|
+ return f.sqlite3_column_blob_fn(stmt,col);
|
|
+}
|
|
+#endif
|
|
+
|
|
+
|
|
+
|
|
+int db_sqlite_stmt_get_column_int(db_stmt_p stmt, int column) {
|
|
+ sqlite3_stmt *sqlite_stmt = stmt->private;
|
|
+ int rc = f.sqlite3_column_int_fn(sqlite_stmt, column);
|
|
+ return db_sqlite_error_map(rc);
|
|
+}
|
|
+
|
|
+
|
|
+int db_sqlite_stmt_next(db_stmt_p stmt)
|
|
+{
|
|
+ sqlite3_stmt *sqlite_stmt = stmt->private;
|
|
+ int rc = f.sqlite3_step_fn(sqlite_stmt);
|
|
+ return db_sqlite_error_map(rc);
|
|
+}
|
|
+
|
|
+int db_sqlite_stmt_close(db_stmt_p stmt)
|
|
+{
|
|
+ sqlite3_stmt *sqlite_stmt = stmt->private;
|
|
+ int rc = f.sqlite3_finalize_fn(sqlite_stmt);
|
|
+ free(stmt);
|
|
+ return db_sqlite_error_map(rc);
|
|
+}
|
|
+
|
|
+
|
|
+int db_sqlite_upgrade_to_schema(int version)
|
|
+{
|
|
+ if (version < 0)
|
|
+ return DB_OK;
|
|
+
|
|
+ if (version > 0)
|
|
+ return DB_ERROR;
|
|
+
|
|
+ csync_debug(2, "Upgrading database schema to version %d.\n", version);
|
|
+
|
|
+ csync_db_sql("Creating file table",
|
|
+ "CREATE TABLE file ("
|
|
+ " filename, checktxt,"
|
|
+ " UNIQUE ( filename ) ON CONFLICT REPLACE"
|
|
+ ")");
|
|
+
|
|
+ csync_db_sql("Creating dirty table",
|
|
+ "CREATE TABLE dirty ("
|
|
+ " filename, forced, myname, peername,"
|
|
+ " UNIQUE ( filename, peername ) ON CONFLICT IGNORE"
|
|
+ ")");
|
|
+
|
|
+ csync_db_sql("Creating hint table",
|
|
+ "CREATE TABLE hint ("
|
|
+ " filename, recursive,"
|
|
+ " UNIQUE ( filename, recursive ) ON CONFLICT IGNORE"
|
|
+ ")");
|
|
+
|
|
+ csync_db_sql("Creating action table",
|
|
+ "CREATE TABLE action ("
|
|
+ " filename, command, logfile,"
|
|
+ " UNIQUE ( filename, command ) ON CONFLICT IGNORE"
|
|
+ ")");
|
|
+
|
|
+ csync_db_sql("Creating x509_cert table",
|
|
+ "CREATE TABLE x509_cert ("
|
|
+ " peername, certdata,"
|
|
+ " UNIQUE ( peername ) ON CONFLICT IGNORE"
|
|
+ ")");
|
|
+
|
|
+ return DB_OK;
|
|
+}
|
|
+
|
|
+#endif
|
|
diff --git a/db_sqlite.h b/db_sqlite.h
|
|
new file mode 100644
|
|
index 0000000..f5e2340
|
|
--- /dev/null
|
|
+++ b/db_sqlite.h
|
|
@@ -0,0 +1,19 @@
|
|
+
|
|
+#ifndef DB_SQLITE_H
|
|
+#define DB_SQLITE_H
|
|
+
|
|
+/* public */
|
|
+int db_sqlite_open(const char *file, db_conn_p *conn_p);
|
|
+/* Private */
|
|
+void db_sqlite_close(db_conn_p db_conn);
|
|
+int db_sqlite_exec(db_conn_p conn, const char *sql);
|
|
+int db_sqlite_prepare(db_conn_p conn, const char *sql, db_stmt_p *stmt_p, char **pptail);
|
|
+int db_sqlite_stmt_next(db_stmt_p stmt);
|
|
+const char* db_sqlite_stmt_get_column_text(db_stmt_p stmt, int column);
|
|
+const void* db_sqlite_stmt_get_column_blob(db_stmt_p stmt, int column);
|
|
+int db_sqlite_stmt_get_column_int(db_stmt_p stmt, int column);
|
|
+int db_sqlite_stmt_close(db_stmt_p stmt);
|
|
+const char *db_sqlite_errmsg(db_conn_p conn);
|
|
+int db_sqlite_upgrade_to_schema(int version);
|
|
+
|
|
+#endif
|
|
diff --git a/db_sqlite2.c b/db_sqlite2.c
|
|
new file mode 100644
|
|
index 0000000..8b2c85e
|
|
--- /dev/null
|
|
+++ b/db_sqlite2.c
|
|
@@ -0,0 +1,257 @@
|
|
+/*
|
|
+ * Copyright (C) 2010 Dennis Schafroth <dennis@schafroth.com>>
|
|
+ * Copyright (C) 2010 Johannes Thoma <johannes.thoma@gmx.at>
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or modify
|
|
+ * it under the terms of the GNU General Public License as published by
|
|
+ * the Free Software Foundation; either version 2 of the License, or
|
|
+ * (at your option) any later version.
|
|
+ *
|
|
+ * This program is distributed in the hope that it will be useful,
|
|
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
+ * GNU General Public License for more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License
|
|
+ * along with this program; if not, write to the Free Software
|
|
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
|
+ */
|
|
+
|
|
+#include "db_api.h"
|
|
+#include "config.h"
|
|
+
|
|
+#ifndef HAVE_SQLITE
|
|
+/* dummy function to implement a open that fails */
|
|
+int db_sqlite2_open(const char *file, db_conn_p *conn_p) {
|
|
+ return DB_ERROR;
|
|
+}
|
|
+#else
|
|
+
|
|
+#include <sqlite.h>
|
|
+#include <stdio.h>
|
|
+#include <stdarg.h>
|
|
+#include <stdlib.h>
|
|
+#include <unistd.h>
|
|
+#include <signal.h>
|
|
+#include <time.h>
|
|
+#include "db_sqlite2.h"
|
|
+#include <dl.h>
|
|
+
|
|
+
|
|
+static struct db_sqlite_fns {
|
|
+ sqlite *(*sqlite_open_fn)(const char *, int, char**);
|
|
+ void (*sqlite_close_fn)(sqlite *);
|
|
+ int (*sqlite_exec_fn)(sqlite *, char *, int (*)(void*,int,char**,char**), void *, char **);
|
|
+ int (*sqlite_compile_fn)(sqlite *, const char *, const char **, sqlite_vm **, char **);
|
|
+ int (*sqlite_step_fn)(sqlite_vm *, int *, const char ***, const char ***);
|
|
+ int (*sqlite_finalize_fn)(sqlite_vm *, char **);
|
|
+} f;
|
|
+
|
|
+static char *errmsg;
|
|
+
|
|
+static void *dl_handle;
|
|
+
|
|
+
|
|
+static void db_sqlite_dlopen(void)
|
|
+{
|
|
+ csync_debug(1, "Opening shared library libsqlite.so\n");
|
|
+
|
|
+ dl_handle = dlopen("libsqlite.so", RTLD_LAZY);
|
|
+ if (dl_handle == NULL) {
|
|
+ csync_debug(1, "Libsqlite.so not found, trying libsqlite.so.0\n");
|
|
+ dl_handle = dlopen("libsqlite.so.0", RTLD_LAZY);
|
|
+ if (dl_handle == NULL) {
|
|
+ csync_fatal("Could not open libsqlite.so: %s\nPlease install sqlite client library (libsqlite) or use other database (postgres, mysql)\n", dlerror());
|
|
+ }
|
|
+ }
|
|
+ csync_debug(1, "Opening shared library libsqlite.so\n");
|
|
+
|
|
+ LOOKUP_SYMBOL(dl_handle, sqlite_open);
|
|
+ LOOKUP_SYMBOL(dl_handle, sqlite_close);
|
|
+ LOOKUP_SYMBOL(dl_handle, sqlite_exec);
|
|
+ LOOKUP_SYMBOL(dl_handle, sqlite_compile);
|
|
+ LOOKUP_SYMBOL(dl_handle, sqlite_step);
|
|
+ LOOKUP_SYMBOL(dl_handle, sqlite_finalize);
|
|
+
|
|
+}
|
|
+
|
|
+
|
|
+int db_sqlite2_open(const char *file, db_conn_p *conn_p)
|
|
+{
|
|
+ db_sqlite_dlopen();
|
|
+
|
|
+ sqlite *db = f.sqlite_open_fn(file, 0, &errmsg);
|
|
+ if ( db == 0 ) {
|
|
+ return DB_ERROR;
|
|
+ };
|
|
+ db_conn_p conn = calloc(1, sizeof(*conn));
|
|
+ if (conn == NULL) {
|
|
+ return DB_ERROR;
|
|
+ }
|
|
+ *conn_p = conn;
|
|
+ conn->private = db;
|
|
+ conn->close = db_sqlite2_close;
|
|
+ conn->exec = db_sqlite2_exec;
|
|
+ conn->prepare = db_sqlite2_prepare;
|
|
+ conn->errmsg = NULL;
|
|
+ conn->upgrade_to_schema = db_sqlite2_upgrade_to_schema;
|
|
+ return DB_OK;
|
|
+}
|
|
+
|
|
+void db_sqlite2_close(db_conn_p conn)
|
|
+{
|
|
+ if (!conn)
|
|
+ return;
|
|
+ if (!conn->private)
|
|
+ return;
|
|
+ f.sqlite_close_fn(conn->private);
|
|
+ conn->private = 0;
|
|
+}
|
|
+
|
|
+const char *db_sqlite2_errmsg(db_conn_p conn)
|
|
+{
|
|
+ if (!conn)
|
|
+ return "(no connection)";
|
|
+ if (!conn->private)
|
|
+ return "(no private data in conn)";
|
|
+ return errmsg;
|
|
+}
|
|
+
|
|
+int db_sqlite2_exec(db_conn_p conn, const char *sql) {
|
|
+ int rc;
|
|
+ if (!conn)
|
|
+ return DB_NO_CONNECTION;
|
|
+
|
|
+ if (!conn->private) {
|
|
+ /* added error element */
|
|
+ return DB_NO_CONNECTION_REAL;
|
|
+ }
|
|
+ rc = f.sqlite_exec_fn(conn->private, (char*) sql, 0, 0, &errmsg);
|
|
+ /* On error parse, create DB ERROR element */
|
|
+ return rc;
|
|
+}
|
|
+
|
|
+int db_sqlite2_prepare(db_conn_p conn, const char *sql, db_stmt_p *stmt_p, char **pptail) {
|
|
+ int rc;
|
|
+ sqlite *db;
|
|
+
|
|
+ *stmt_p = NULL;
|
|
+
|
|
+ if (!conn)
|
|
+ return DB_NO_CONNECTION;
|
|
+
|
|
+ if (!conn->private) {
|
|
+ /* added error element */
|
|
+ return DB_NO_CONNECTION_REAL;
|
|
+ }
|
|
+ db = conn->private;
|
|
+
|
|
+ db_stmt_p stmt = malloc(sizeof(*stmt));
|
|
+ sqlite_vm *sqlite_stmt = 0;
|
|
+ rc = f.sqlite_compile_fn(db, sql, 0, &sqlite_stmt, &errmsg);
|
|
+ if (rc != SQLITE_OK)
|
|
+ return 0;
|
|
+ stmt->private = sqlite_stmt;
|
|
+ *stmt_p = stmt;
|
|
+ stmt->get_column_text = db_sqlite2_stmt_get_column_text;
|
|
+ stmt->get_column_blob = db_sqlite2_stmt_get_column_blob;
|
|
+ stmt->get_column_int = db_sqlite2_stmt_get_column_int;
|
|
+ stmt->next = db_sqlite2_stmt_next;
|
|
+ stmt->close = db_sqlite2_stmt_close;
|
|
+ stmt->db = conn;
|
|
+ return DB_OK;
|
|
+}
|
|
+
|
|
+const char *db_sqlite2_stmt_get_column_text(db_stmt_p stmt, int column) {
|
|
+ if (!stmt || !stmt->private) {
|
|
+ return 0;
|
|
+ }
|
|
+ sqlite_vm *sqlite_stmt = stmt->private;
|
|
+ const char **values = stmt->private2;
|
|
+ return values[column];
|
|
+}
|
|
+
|
|
+const void* db_sqlite2_stmt_get_column_blob(db_stmt_p stmt, int col) {
|
|
+ return db_sqlite2_stmt_get_column_text(stmt, col);
|
|
+}
|
|
+
|
|
+int db_sqlite2_stmt_get_column_int(db_stmt_p stmt, int column) {
|
|
+ sqlite_vm *sqlite_stmt = stmt->private;
|
|
+ const char **values = stmt->private2;
|
|
+ const char *str_value = values[column];
|
|
+ int value = 0;
|
|
+ if (value)
|
|
+ value = atoi(str_value);
|
|
+ /* TODO missing way to return error */
|
|
+ return value;
|
|
+}
|
|
+
|
|
+
|
|
+int db_sqlite2_stmt_next(db_stmt_p stmt)
|
|
+{
|
|
+ sqlite_vm *sqlite_stmt = stmt->private;
|
|
+ const char **dataSQL_V, **dataSQL_N;
|
|
+ const char **values;
|
|
+ const char **names;
|
|
+ int columns;
|
|
+
|
|
+ int rc = f.sqlite_step_fn(sqlite_stmt, &columns, &values, &names);
|
|
+ stmt->private2 = values;
|
|
+ /* TODO error mapping */
|
|
+ return rc; // == SQLITE_ROW;
|
|
+}
|
|
+
|
|
+int db_sqlite2_stmt_close(db_stmt_p stmt)
|
|
+{
|
|
+ sqlite_vm *sqlite_stmt = stmt->private;
|
|
+ int rc = f.sqlite_finalize_fn(sqlite_stmt, &errmsg);
|
|
+ free(stmt);
|
|
+ return rc;
|
|
+}
|
|
+
|
|
+
|
|
+int db_sqlite2_upgrade_to_schema(int version)
|
|
+{
|
|
+ if (version < 0)
|
|
+ return DB_OK;
|
|
+
|
|
+ if (version > 0)
|
|
+ return DB_ERROR;
|
|
+
|
|
+ csync_debug(2, "Upgrading database schema to version %d.\n", version);
|
|
+
|
|
+ csync_db_sql("Creating file table",
|
|
+ "CREATE TABLE file ("
|
|
+ " filename, checktxt,"
|
|
+ " UNIQUE ( filename ) ON CONFLICT REPLACE"
|
|
+ ")");
|
|
+
|
|
+ csync_db_sql("Creating dirty table",
|
|
+ "CREATE TABLE dirty ("
|
|
+ " filename, forced, myname, peername,"
|
|
+ " UNIQUE ( filename, peername ) ON CONFLICT IGNORE"
|
|
+ ")");
|
|
+
|
|
+ csync_db_sql("Creating hint table",
|
|
+ "CREATE TABLE hint ("
|
|
+ " filename, recursive,"
|
|
+ " UNIQUE ( filename, recursive ) ON CONFLICT IGNORE"
|
|
+ ")");
|
|
+
|
|
+ csync_db_sql("Creating action table",
|
|
+ "CREATE TABLE action ("
|
|
+ " filename, command, logfile,"
|
|
+ " UNIQUE ( filename, command ) ON CONFLICT IGNORE"
|
|
+ ")");
|
|
+
|
|
+ csync_db_sql("Creating x509_cert table",
|
|
+ "CREATE TABLE x509_cert ("
|
|
+ " peername, certdata,"
|
|
+ " UNIQUE ( peername ) ON CONFLICT IGNORE"
|
|
+ ")");
|
|
+
|
|
+ return DB_OK;
|
|
+}
|
|
+
|
|
+
|
|
+#endif
|
|
diff --git a/db_sqlite2.h b/db_sqlite2.h
|
|
new file mode 100644
|
|
index 0000000..79336a4
|
|
--- /dev/null
|
|
+++ b/db_sqlite2.h
|
|
@@ -0,0 +1,18 @@
|
|
+
|
|
+#ifndef DB_SQLITE2_H
|
|
+#define DB_SQLITE2_H
|
|
+
|
|
+/* public */
|
|
+int db_sqlite2_open(const char *file, db_conn_p *conn_p);
|
|
+/* Private, should not be here */
|
|
+void db_sqlite2_close(db_conn_p db_conn);
|
|
+int db_sqlite2_exec(db_conn_p conn, const char *sql);
|
|
+int db_sqlite2_prepare(db_conn_p conn, const char *sql, db_stmt_p *stmt_p, char **pptail);
|
|
+int db_sqlite2_stmt_next(db_stmt_p stmt);
|
|
+const char* db_sqlite2_stmt_get_column_text(db_stmt_p stmt, int column);
|
|
+const void* db_sqlite2_stmt_get_column_blob(db_stmt_p stmt, int column);
|
|
+int db_sqlite2_stmt_get_column_int(db_stmt_p stmt, int column);
|
|
+int db_sqlite2_stmt_close(db_stmt_p stmt);
|
|
+int db_sqlite2_upgrade_to_schema(int version);
|
|
+
|
|
+#endif
|
|
diff --git a/dl.h b/dl.h
|
|
new file mode 100644
|
|
index 0000000..0769b2f
|
|
--- /dev/null
|
|
+++ b/dl.h
|
|
@@ -0,0 +1,12 @@
|
|
+#ifndef DL_H
|
|
+#define DL_H
|
|
+
|
|
+#include <dlfcn.h>
|
|
+
|
|
+#define LOOKUP_SYMBOL(dl_handle, sym) \
|
|
+ f.sym ## _fn = dlsym(dl_handle, #sym); \
|
|
+ if ((f.sym ## _fn) == NULL) { \
|
|
+ csync_fatal ("Could not lookup %s in shared library: %s\n", #sym, dlerror()); \
|
|
+ }
|
|
+
|
|
+#endif
|
|
diff --git a/doc/csync2_paper.tex b/doc/csync2_paper.tex
|
|
new file mode 100644
|
|
index 0000000..00f0de0
|
|
--- /dev/null
|
|
+++ b/doc/csync2_paper.tex
|
|
@@ -0,0 +1,910 @@
|
|
+\documentclass[a4paper,twocolumn]{article}
|
|
+\usepackage{nopageno}
|
|
+
|
|
+\usepackage{svn}
|
|
+\SVNdate $Date$
|
|
+
|
|
+\def\csync2{{\sc Csync$^{2}$}}
|
|
+
|
|
+\begin{document}
|
|
+
|
|
+\title{Cluster synchronization with \csync2}
|
|
+\author{Clifford Wolf, http://www.clifford.at/}
|
|
+\maketitle
|
|
+
|
|
+\section{Introduction}
|
|
+
|
|
+\csync2 [1] is a tool for asynchronous file synchronization in clusters.
|
|
+Asynchronous file synchronization is good for files which are seldom modified -
|
|
+such as configuration files or application images - but it is not adequate for
|
|
+some other types of data.
|
|
+
|
|
+For instance a database with continuous write accesses should be synced
|
|
+synchronously in order to ensure the data integrity. But that does not
|
|
+automatically mean that synchronous synchronization is better; it simply is
|
|
+different and there are many cases where asynchronous synchronization is
|
|
+favored over synchronous synchronization. Some pros of asynchronous
|
|
+synchronization are:
|
|
+
|
|
+{\bf 1.}
|
|
+Most asynchronous synchronization tools (including \csync2) are implemented as
|
|
+single-shot commands which need to be executed each time in order to run one
|
|
+synchronization cycle. Therefore it is possible to test changes on one host
|
|
+before deploying them on the others (and also return to the old state if the
|
|
+changes turn out to be bogus).
|
|
+
|
|
+{\bf 2.}
|
|
+The synchronization algorithms are much simpler and thus less error-prone.
|
|
+
|
|
+{\bf 3.}
|
|
+Asynchronous synchronization tools can be (and usually are) implemented as
|
|
+normal user mode programs. Synchronous synchronization tools need to be
|
|
+implemented as operating system extensions. Therefore asynchronous tools are
|
|
+easier to deploy and more portable.
|
|
+
|
|
+{\bf 4.}
|
|
+It is much easier to build systems which allow setups with many hosts and
|
|
+complex replication rules.
|
|
+
|
|
+But most asynchronous synchronization tools are pretty primitive and do not
|
|
+even cover a small portion of the issues found in real world environments.
|
|
+
|
|
+I have developed \csync2 because I found none of the existing tools for
|
|
+asynchronous synchronization satisfying. The development of \csync2 has
|
|
+been sponsored by LINBIT Information Technologies [2], the company which also
|
|
+sponsors the synchronous block device synchronization toolchain DRBD [3].
|
|
+
|
|
+\hspace{0.2cm}
|
|
+
|
|
+Note: I will simply use the term {\it synchronization} instead of the
|
|
+semi-oxymoron {\it asynchronous synchronization} in the rest of this paper.
|
|
+
|
|
+\subsection{\csync2 features}
|
|
+
|
|
+Most synchronization tools are very simple wrappers for remote-copy tools such
|
|
+as {\tt rsync} or {\tt scp}. These solutions work well in most cases but
|
|
+still leave a big gap for more sophisticated tools such as \csync2. The most
|
|
+important features of \csync2 are described in the following sections.
|
|
+
|
|
+\subsubsection{Conflict detection}
|
|
+
|
|
+\label{confl_detect}
|
|
+
|
|
+Most of the trivial synchronization tools just copy the newer file over the
|
|
+older one. This can be a very dangerous behavior if the same file has been
|
|
+changed on more than one host. \csync2 detects such a situation as a conflict
|
|
+and will not synchronize the file. Those conflicts then need to be resolved
|
|
+manually by the cluster administrator.
|
|
+
|
|
+It is not considered as a conflict by \csync2 when the same change has been
|
|
+performed on two hosts (e.g. because it has already been synchronized with
|
|
+another tool).
|
|
+
|
|
+It is also possible to let \csync2 resolve conflicts automatically for some or
|
|
+all files using one of the pre-defined auto-resolve methods. The available
|
|
+methods are: {\tt none} (the default behavior), {\tt first} (the host on which
|
|
+\csync2 is executed first wins), {\tt younger} and {\tt older} (the younger or
|
|
+older file wins), {\tt bigger} and {\tt smaller} (the bigger or smaller file
|
|
+wins), {\tt left} and {\tt right} (the host on the left side or the right side
|
|
+in the host list wins).
|
|
+
|
|
+The {\tt younger}, {\tt older}, {\tt bigger} and {\tt smaller} methods let the
|
|
+remote side win the conflict if the file has been removed on the local side.
|
|
+
|
|
+\subsubsection{Replicating file removals}
|
|
+
|
|
+Many synchronization tools can not synchronize file removals because they can
|
|
+not distinguish between the file being removed on one host and being created on
|
|
+the other one. So instead of removing the file on the second host they recreate
|
|
+it on the first one.
|
|
+
|
|
+\csync2 detects file removals as such and can synchronize them correctly.
|
|
+
|
|
+\subsubsection{Complex setups}
|
|
+
|
|
+Many synchronization tools are strictly designed for two-host-setups. This is
|
|
+an inadequate restriction and so \csync2 can handle any number of hosts.
|
|
+
|
|
+\csync2 can even handle complex setups where e.g. all hosts in a cluster share
|
|
+the {\tt /etc/hosts} file, but one {\tt /etc/passwd} file is only shared among
|
|
+the members of a small sub-group of hosts and another {\tt /etc/passwd} file is
|
|
+shared among the other hosts in the cluster.
|
|
+
|
|
+\subsubsection{Reacting to updates}
|
|
+
|
|
+In many cases it is not enough to simply synchronize a file between cluster
|
|
+nodes. It also is important to tell the applications using the synchronized
|
|
+file that the underlying file has been changed, e.g. by restarting the
|
|
+application.
|
|
+
|
|
+\csync2 can be configured to execute arbitrary commands when files matching an
|
|
+arbitrary set of shell patterns are synchronized.
|
|
+
|
|
+\section{The \csync2 algorithm}
|
|
+
|
|
+Many other synchronization tools compare the hosts, try to figure out which
|
|
+host is the most up-to-date one and then synchronize the state from this host
|
|
+to all other hosts. This algorithm can not detect conflicts, can not
|
|
+distinguish between file removals and file creations and therfore it is not
|
|
+used in \csync2.
|
|
+
|
|
+\csync2 creates a little database with filesystem metadata on each host. This
|
|
+database ({\tt /var/lib/csync2/{\it hostname}.db}) contains a list of the local
|
|
+files under the control of \csync2. The database also contains information such
|
|
+as the file modification timestamps and file sizes.
|
|
+
|
|
+This database is used by \csync2 to detect changes by comparison with the local
|
|
+filesystem. The synchronization itself is then performed using the \csync2
|
|
+protocol (TCP port 30865).
|
|
+
|
|
+Note that this approach implies that \csync2 can only push changes from the
|
|
+machine on which the changes has been performed to the other machines in the
|
|
+cluster. Running \csync2 on any other machine in the cluster can not detect and
|
|
+so can not synchronize the changes.
|
|
+
|
|
+Librsync [4] is used for bandwidth-saving file synchronization and SSL is used for
|
|
+encrypting the network traffic. The sqlite library [5] (version 2) is used for
|
|
+managing the \csync2 database files. Authentication is performed using
|
|
+auto-generated pre-shared-keys in combination with the peer IP address and
|
|
+the peer SSL certificate.
|
|
+
|
|
+\section{Setting up \csync2}
|
|
+
|
|
+\subsection{Building \csync2 from source}
|
|
+
|
|
+Simply download the latest \csync2 source tar.gz from {\bf \tt http://oss.linbit.com/csync2/},
|
|
+extract it and run the usual {\tt ./configure} - {\tt make} - {\tt make install} trio.
|
|
+
|
|
+\csync2 has a few prerequisites in addition to a C compiler, the standard
|
|
+system libraries and headers and the usual gnu toolchain ({\tt make}, etc):
|
|
+
|
|
+{\bf 1.} You need librsync, libsqlite (version 2) and libssl installed
|
|
+(including development headers).
|
|
+
|
|
+{\bf 2.} Bison and flex are needed to build the configuration file parser.
|
|
+
|
|
+\subsection{\csync2 in Linux distributions}
|
|
+
|
|
+As of this writing there are no official Debian, RedHat or SuSE packages for
|
|
+\csync2. Gentoo has a \csync2 package, but is has not been updated for a year
|
|
+now. As far as I know, ROCK Linux [6] is the only system with an up-to-date
|
|
+\csync2 package. So I recommend that all users of non-ROCK distributions built
|
|
+the package from source.
|
|
+
|
|
+The \csync2 source package contains an RPM {\tt .specs} file as well as a {\tt
|
|
+debian/} directory. So it is possible to use {\tt rpmbuild} or {\tt debuild} to
|
|
+build \csync2.
|
|
+
|
|
+\subsection{Post installation}
|
|
+
|
|
+Next you need to create an SSL certificate for the local \csync2 server.
|
|
+Simply running {\tt make cert} in the \csync2 source directory will create and
|
|
+install a self-signed SSL certificate for you. Alternatively, if you have no
|
|
+source around, run the following commands:
|
|
+
|
|
+\begin{verbatim}
|
|
+openssl genrsa \
|
|
+ -out /etc/csync2_ssl_key.pem 1024
|
|
+openssl req -new \
|
|
+ -key /etc/csync2_ssl_key.pem \
|
|
+ -out /etc/csync2_ssl_cert.csr
|
|
+openssl x509 -req -days 600 \
|
|
+ -in /etc/csync2_ssl_cert.csr \
|
|
+ -signkey /etc/csync2_ssl_key.pem \
|
|
+ -out /etc/csync2_ssl_cert.pem
|
|
+\end{verbatim}
|
|
+
|
|
+You have to do that on each host you're running csync2 on. When servers are
|
|
+talking with each other for the first time, they add each other to the database.
|
|
+
|
|
+The \csync2 TCP port 30865 needs to be added to the {\tt /etc/services} file and
|
|
+inetd needs to be told about \csync2 by adding
|
|
+
|
|
+\begin{verbatim}
|
|
+csync2 stream tcp nowait root \
|
|
+ /usr/local/sbin/csync2 csync2 -i
|
|
+\end{verbatim}
|
|
+
|
|
+to {\tt /etc/inetd.conf}.
|
|
+
|
|
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
|
+
|
|
+\begin{figure*}[t]
|
|
+ \begin{center}
|
|
+\begin{verbatim}
|
|
+group mygroup # A synchronization group (see 3.4.1)
|
|
+{
|
|
+ host host1 host2 (host3); # host list (see 3.4.2)
|
|
+ host host4@host4-eth2;
|
|
+
|
|
+ key /etc/csync2.key_mygroup; # pre-shared-key (see 3.4.3)
|
|
+
|
|
+ include /etc/apache; # include/exclude patterns (see 3.4.4)
|
|
+ include %homedir%/bob;
|
|
+ exclude %homedir%/bob/temp;
|
|
+ exclude *~ .*;
|
|
+
|
|
+ action # an action section (see 3.4.5)
|
|
+ {
|
|
+ pattern /etc/apache/httpd.conf;
|
|
+ pattern /etc/apache/sites-available/*;
|
|
+ exec "/usr/sbin/apache2ctl graceful";
|
|
+ logfile "/var/log/csync2_action.log";
|
|
+ do-local;
|
|
+ # do-local-only;
|
|
+ }
|
|
+
|
|
+ backup-directory /var/backups/csync2;
|
|
+ backup-generations 3; # backup old files (see 3.4.11)
|
|
+
|
|
+ auto none; # auto resolving mode (see 3.4.6)
|
|
+}
|
|
+
|
|
+prefix homedir # a prefix declaration (see 3.4.7)
|
|
+{
|
|
+ on host[12]: /export/users;
|
|
+ on *: /home;
|
|
+}
|
|
+\end{verbatim}
|
|
+ \end{center}
|
|
+ \caption{Example \csync2 configuration file}
|
|
+\end{figure*}
|
|
+
|
|
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
|
+
|
|
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
|
+
|
|
+\begin{figure*}[t]
|
|
+ \begin{center}
|
|
+\begin{verbatim}
|
|
+csync2 -cr /
|
|
+if csync2 -M; then
|
|
+ echo "!!"
|
|
+ echo "!! There are unsynced changes! Type 'yes' if you still want to"
|
|
+ echo "!! exit (or press crtl-c) and anything else if you want to start"
|
|
+ echo "!! a new login shell instead."
|
|
+ echo "!!"
|
|
+ if read -p "Do you really want to logout? " in &&
|
|
+ [ ".$in" != ".yes" ]; then
|
|
+ exec bash --login
|
|
+ fi
|
|
+fi
|
|
+\end{verbatim}
|
|
+ \end{center}
|
|
+ \caption{The {\tt csync2\_locheck.sh} script}
|
|
+\end{figure*}
|
|
+
|
|
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
|
+
|
|
+\subsection{Configuration File}
|
|
+
|
|
+Figure 1 shows a simple \csync2 configuration file. The configuration filename
|
|
+is {\tt /etc/csync2.cfg} when no {\tt -C {\it configname}} option has been
|
|
+passed and {\tt /etc/csync2\_{\it configname}.cfg} with a {\tt -C {\it
|
|
+configname}} option.
|
|
+
|
|
+\subsubsection{Synchronization Groups}
|
|
+
|
|
+In the example configuration file you will find the declaration of a
|
|
+synchronization group called {\tt mygroup}. A \csync2 setup can have any number
|
|
+of synchronization groups. Each group has its own list of member hosts and
|
|
+include/exclude rules.
|
|
+
|
|
+\csync2 automatically ignores all groups which do not contain the local
|
|
+hostname in the host list. This way you can use one big \csync2 configuration
|
|
+file for the entire cluster.
|
|
+
|
|
+\subsubsection{Host Lists}
|
|
+
|
|
+Host lists are specified using the {\tt host} keyword. You can eighter specify
|
|
+the hosts in a whitespace seperated list or use an extra {\tt host} statement
|
|
+for each host.
|
|
+
|
|
+The hostnames used here must be the local hostnames of the cluster nodes. That
|
|
+means you must use exactly the same string as printed out by the {\tt hostname}
|
|
+command. Otherwise csync2 would be unable to associate the hostnames in the
|
|
+configuration file with the cluster nodes.
|
|
+
|
|
+The {\tt -N \it hostname} command line option can be used to set the local
|
|
+hostname used by \csync2 to a different value than the one provided by the {\tt
|
|
+hostname} command. This may be e.g. useful for environments where the local
|
|
+hostnames are automatically set by a DHCP server and because of that change
|
|
+often.
|
|
+
|
|
+Sometimes it is desired that a host is receiving \csync2 connections on an IP
|
|
+address which is not the IP address its DNS entry resolves to, e.g.~when a
|
|
+crossover cable is used to directly connect the hosts or an extra
|
|
+synchronization network should be used. In this cases the syntax {\tt{\it
|
|
+hostname}@{\it interfacename}} has to be used for the {\tt host} records (see
|
|
+{\tt host4} in the example config file).
|
|
+
|
|
+Sometimes a host shall only receive updates from other hosts in the
|
|
+synchronization group but shall not be allowed to send updates to the other
|
|
+hosts. Such hosts (so-called {\it slave hosts}) must be specified in
|
|
+brackets, such as {\tt host3} in the example config file.
|
|
+
|
|
+\subsubsection{Pre-Shared-Keys}
|
|
+
|
|
+Authentication is performed using the IP addresses and pre-shared-keys in
|
|
+\csync2. Each synchronization group in the config file must have exactly one
|
|
+{\tt key} record specifying the file containing the pre-shared-key for this
|
|
+group. It is recommended to use a separate key for each synchronization group
|
|
+and only place a key file on those hosts which actually are members in the
|
|
+corresponding synchronization group.
|
|
+
|
|
+The key files can be generated with {\tt csync2 -k {\it filename}}.
|
|
+
|
|
+\subsubsection{Include/Exclude Patterns}
|
|
+
|
|
+The {\tt include} and {\tt exclude} patterns are used to specify which files
|
|
+should be synced in the synchronization group.
|
|
+
|
|
+There are two kinds of patterns: pathname patterns which start with a slash
|
|
+character (or a prefix such as the {\tt \%homedir\%} in the example; prefixes
|
|
+are explained in a later section) and basename patterns which do not.
|
|
+
|
|
+The last matching pattern for each of both categories is chosen. If
|
|
+both categories match, the file will be synchronized.
|
|
+
|
|
+The pathname patterns are matched against the beginning of the filename. So they
|
|
+must either match the full absolute filename or must match a directory in the
|
|
+path to the file. The file will not be synchronized if no matching {\tt include} or
|
|
+{\tt exclude} pathname pattern is found (i.e. the default pathname pattern is
|
|
+an exclude pattern).
|
|
+
|
|
+The basename patterns are matched against the base filename without the path. So
|
|
+they can e.g. be used to include or exclude files by their filename extensions.
|
|
+The default basename pattern is an include pattern.
|
|
+
|
|
+In our example config file that means that all files from {\tt /etc/apache} and
|
|
+{\tt \%homedir\%/bob} are synced, except the dot files, files with a tilde
|
|
+character at the end of the filename, and files from {\tt
|
|
+\%homedir\%/bob/temp}.
|
|
+
|
|
+\subsubsection{Actions}
|
|
+
|
|
+Each synchronization group may have any number of {\tt action} sections. These
|
|
+{\tt action} sections are used to specify shell commands which should be
|
|
+executed after a file is synchronized that matches any of the specified
|
|
+patterns.
|
|
+
|
|
+The {\tt exec} statement is used to specify the command which should be
|
|
+executed. Note that if multiple files matching the pattern are synced in one
|
|
+run, this command will only be executed once. The special token {\tt \%\%} in
|
|
+the command string is substituted with the list of files which triggered the
|
|
+command execution.
|
|
+
|
|
+The output of the command is appended to the specified logfile, or to
|
|
+{\tt /dev/null} if the {\tt logfile} statement is omitted.
|
|
+
|
|
+Usually the action is only triggered on the targed hosts, not on the host on
|
|
+which the file modification has been detected in the first place. The {\tt
|
|
+do-local} statement can be used to change this behavior and let \csync2 also
|
|
+execute the command on the host from which the modification originated. You can
|
|
+use {\ttdo-local-only} to execute the action only on this machine.
|
|
+
|
|
+\subsubsection{Conflict Auto-resolving}
|
|
+
|
|
+The {\tt auto} statement is used to specify the conflict auto-resolving
|
|
+mechanism for this synchronization group. The default value is {\tt auto none}.
|
|
+
|
|
+See section \ref{confl_detect} for a list of possible values for this setting.
|
|
+
|
|
+\subsubsection{Prefix Declarations}
|
|
+
|
|
+Prefixes (such as the {\tt \%homedir\%} prefix in the example configuration
|
|
+file) can be used to synchronize directories which are named differently on
|
|
+the cluster nodes. In the example configuration file the directory for the
|
|
+user home directories is {\tt /export/users} on the hosts {\tt host1} and
|
|
+{\tt host2} and {\tt /home} on the other hosts.
|
|
+
|
|
+The prefix value must be an absolute path name and must not contain any
|
|
+wildcard characters.
|
|
+
|
|
+\subsubsection{The {\tt nossl} statement}
|
|
+
|
|
+Usually all \csync2 network communication is encrypted using SSL. This can be
|
|
+changed with the {\tt nossl} statement. This statement may only occur in the
|
|
+root context (not in a {\tt group} or {\tt prefix} section) and has two
|
|
+parameters. The first one is a shell pattern matching the source DNS name for
|
|
+the TCP connection and the second one is a shell pattern matching the
|
|
+destination DNS name.
|
|
+
|
|
+So if e.g.~a secure synchronization network is used between some hosts and
|
|
+all the interface DNS names end with {\tt -sync}, a simple
|
|
+
|
|
+\begin{verbatim}
|
|
+nossl *-sync *-sync;
|
|
+\end{verbatim}
|
|
+
|
|
+will disable the encryption overhead on the synchronization network. All other
|
|
+traffic will stay SSL encrypted.
|
|
+
|
|
+\subsubsection{The {\tt config} statement}
|
|
+
|
|
+The {\tt config} statement is nothing more then an include statement and can be
|
|
+used to include other config files. This can be used to modularize the
|
|
+configuration file.
|
|
+
|
|
+\subsubsection{The {\tt ignore} statement}
|
|
+
|
|
+The {\tt ignore} statement can be used to tell \csync2 to not check and not sync
|
|
+the file user-id, the file group-id and/or the file permissions. The statement
|
|
+is only valid in the root context and accepts the parameters {\tt uid}, {\tt
|
|
+gid} and {\tt mod} to turn off handling of user-ids, group-ids and file
|
|
+permissions.
|
|
+
|
|
+\subsubsection{The {\tt tempdir} statement}
|
|
+
|
|
+The {\tt tempdir} statement specifies the directory to be used for temporary
|
|
+files while receiving data through librsync. Note that internally, csync2 uses
|
|
+a wrapper around tempnam(3), so the {\tt TMPDIR} environment variable will be
|
|
+considered first, then the directory defined here, and if that does not work out,
|
|
+the system default or {\tt /tmp} will be used.
|
|
+
|
|
+\subsubsection{The {\tt lock-timeout} statement}
|
|
+
|
|
+The {\tt lock-timeout} statement specifies the seconds to wait wor a database lock
|
|
+before giving up. Default is 12 seconds. The amount will be slightly randomized
|
|
+with a jitter of up to 6 seconds based on the respective process id.
|
|
+
|
|
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
|
+
|
|
+\begin{figure*}[t]
|
|
+ \begin{center}
|
|
+\begin{verbatim}
|
|
+CREATE TABLE file (
|
|
+ filename, checktxt,
|
|
+ UNIQUE ( filename ) ON CONFLICT REPLACE
|
|
+);
|
|
+
|
|
+CREATE TABLE dirty (
|
|
+ filename, force, myname, peername,
|
|
+ UNIQUE ( filename, peername ) ON CONFLICT IGNORE
|
|
+);
|
|
+
|
|
+CREATE TABLE hint (
|
|
+ filename, recursive,
|
|
+ UNIQUE ( filename, recursive ) ON CONFLICT IGNORE
|
|
+);
|
|
+
|
|
+CREATE TABLE action (
|
|
+ filename, command, logfile,
|
|
+ UNIQUE ( filename, command ) ON CONFLICT IGNORE
|
|
+);
|
|
+
|
|
+CREATE TABLE x509_cert (
|
|
+ peername, certdata,
|
|
+ UNIQUE ( peername ) ON CONFLICT IGNORE
|
|
+);
|
|
+\end{verbatim}
|
|
+ \end{center}
|
|
+ \caption{The \csync2 database schema}
|
|
+\end{figure*}
|
|
+
|
|
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
|
+
|
|
+\subsubsection{Backing up}
|
|
+
|
|
+\csync2 can back up the files it modifies. This may be useful for scenarios
|
|
+where one is afraid of accidentally syncing files in the wrong direction.
|
|
+
|
|
+The {\tt backup-directory} statement is used to tell \csync2 in which directory
|
|
+it should create the backup files and the {\tt backup-generations} statement is
|
|
+used to tell \csync2 how many old versions of the files should be kept in the
|
|
+backup directory.
|
|
+
|
|
+The files in the backup directory are named like the file they back up, with
|
|
+all slashes substituted by underscores and a generation counter appended. Note
|
|
+that only the file content, not the metadata such as ownership and permissions
|
|
+are backed up.
|
|
+
|
|
+Per default \csync2 does not back up the files it modifies. The default
|
|
+value for {\tt backup-generations} is {\tt 3}.
|
|
+
|
|
+\subsection{Activating the Logout Check}
|
|
+
|
|
+The \csync2 sources contain a little script called {\tt csync2\_locheck.sh}
|
|
+(Figure 2).
|
|
+
|
|
+If you copy that script into your {\tt \textasciitilde/.bash\_logout} script
|
|
+(or include it using the {\tt source} shell command), the shell will not let
|
|
+you log out if there are any unsynced changes.
|
|
+
|
|
+\section{Database Schema}
|
|
+
|
|
+Figure 3 shows the \csync2 database schema. The database can be accessed using
|
|
+the {\tt sqlite} command line shell. All string values are URL encoded in the
|
|
+database.
|
|
+
|
|
+The {\tt file} table contains a list of all local files under \csync2 control,
|
|
+the {\tt checktxt} attribute contains a special string with information about
|
|
+file type, size, modification time and more. It looks like this:
|
|
+
|
|
+\begin{verbatim}
|
|
+v1:mtime=1103471832:mode=33152:
|
|
+uid=1001:gid=111:type=reg:size=301
|
|
+\end{verbatim}
|
|
+
|
|
+This {\tt checktxt} attribute is used to check if a file has been changed on
|
|
+the local host.
|
|
+
|
|
+If a local change has been detected, the entry in the {\tt file} table is
|
|
+updated and entries in the {\tt dirty} table are created for all peer hosts
|
|
+which should be updated. This way the information that a host should be updated
|
|
+does not get lost, even if the host in question is unreachable right now. The
|
|
+{\tt force} attribute is set to {\tt 0} by default and to {\tt 1} when the
|
|
+cluster administrator marks one side as the right one in a synchronization
|
|
+conflict.
|
|
+
|
|
+The {\tt hint} table is usually not used. In large setups this table can be
|
|
+filled by a daemon listening on the inotify API. It is possible to tell \csync2
|
|
+to not check all files it is responsible for but only those which have entries
|
|
+in the {\tt hint} table. However, the Linux syscall API is so fast that this
|
|
+only makes sense for really huge setups.
|
|
+
|
|
+The {\tt action} table is used for scheduling actions. Usually this table is
|
|
+empty after \csync2 has been terminated. However, it is possible that \csync2
|
|
+gets interrupted in the middle of the synchronization process. In this case
|
|
+the records in the {\tt action} table are processed when \csync2 is executed
|
|
+the next time.
|
|
+
|
|
+The {\tt x509\_cert} table is used to cache the SSL cetrificates used by the
|
|
+other hosts in the csync2 cluster (like the SSH {\tt known\_hosts} file).
|
|
+
|
|
+\section{Running \csync2}
|
|
+
|
|
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
|
+
|
|
+\begin{figure*}[t]
|
|
+ \begin{center}
|
|
+ \begin{tabular}{|p{0.5\linewidth}|p{0.5\linewidth}|}
|
|
+ \hline
|
|
+\begin{tiny}
|
|
+\begin{verbatim}
|
|
+
|
|
+
|
|
+csync2 1.26 - cluster synchronization tool, 2nd generation
|
|
+LINBIT Information Technologies GmbH <http://www.linbit.com>
|
|
+Copyright (C) 2004, 2005 Clifford Wolf <clifford@clifford.at>
|
|
+This program is free software under the terms of the GNU GPL.
|
|
+
|
|
+Usage: csync2 [-v..] [-C config-name] \
|
|
+ [-D database-dir] [-N hostname] [-p port] ..
|
|
+
|
|
+With file parameters:
|
|
+ -h [-r] file.. Add (recursive) hints for check to db
|
|
+ -c [-r] file.. Check files and maybe add to dirty db
|
|
+ -u [-d] [-r] file.. Updates files if listed in dirty db
|
|
+ -f file.. Force this file in sync (resolve conflict)
|
|
+ -m file.. Mark files in database as dirty
|
|
+
|
|
+Simple mode:
|
|
+ -x [-d] [[-r] file..] Run checks for all given files and update
|
|
+ remote hosts.
|
|
+
|
|
+Without file parameters:
|
|
+ -c Check all hints in db and eventually mark files as dirty
|
|
+ -u [-d] Update (transfer dirty files to peers and mark as clear)
|
|
+
|
|
+ -H List all pending hints from status db
|
|
+ -L List all file-entries from status db
|
|
+ -M List all dirty files from status db
|
|
+
|
|
+ -S myname peername List file-entries from status db for this
|
|
+ synchronization pair.
|
|
+
|
|
+ -T Test if everything is in sync with all peers.
|
|
+
|
|
+ -T filename Test if this file is in sync with all peers.
|
|
+
|
|
+ -T myname peername Test if this synchronization pair is in sync.
|
|
+
|
|
+ -T myname peer file Test only this file in this sync pair.
|
|
+
|
|
+ -TT As -T, but print the unified diffs.
|
|
+
|
|
+ The modes -H, -L, -M and -S return 2 if the requested db is empty.
|
|
+ The mode -T returns 2 if both hosts are in sync.
|
|
+
|
|
+ -i Run in inetd server mode.
|
|
+ -ii Run in stand-alone server mode.
|
|
+ -iii Run in stand-alone server mode (one connect only).
|
|
+
|
|
+ -R Remove files from database which do not match config entries.
|
|
+\end{verbatim}
|
|
+\end{tiny}
|
|
+
|
|
+&
|
|
+
|
|
+\begin{tiny}
|
|
+\begin{verbatim}
|
|
+Modifiers:
|
|
+ -r Recursive operation over subdirectories
|
|
+ -d Dry-run on all remote update operations
|
|
+
|
|
+ -B Do not block everything into big SQL transactions. This
|
|
+ slows down csync2 but allows multiple csync2 processes to
|
|
+ access the database at the same time. Use e.g. when slow
|
|
+ lines are used or huge files are transferred.
|
|
+
|
|
+ -A Open database in asynchronous mode. This will cause data
|
|
+ corruption if the operating system crashes or the computer
|
|
+ loses power.
|
|
+
|
|
+ -I Init-run. Use with care and read the documentation first!
|
|
+ You usually do not need this option unless you are
|
|
+ initializing groups with really large file lists.
|
|
+
|
|
+ -X Also add removals to dirty db when doing a -TI run.
|
|
+ -U Don't mark all other peers as dirty when doing a -TI run.
|
|
+
|
|
+ -G Group1,Group2,Group3,...
|
|
+ Only use this groups from config-file.
|
|
+
|
|
+ -P peer1,peer1,...
|
|
+ Only update this peers (still mark all as dirty).
|
|
+
|
|
+ -F Add new entries to dirty database with force flag set.
|
|
+
|
|
+ -t Print timestamps to debug output (e.g. for profiling).
|
|
+
|
|
+Creating key file:
|
|
+ csync2 -k filename
|
|
+
|
|
+Csync2 will refuse to do anything when a /etc/csync2.lock file is found.
|
|
+\end{verbatim}
|
|
+\end{tiny}
|
|
+ \tabularnewline
|
|
+ \hline
|
|
+ \end{tabular}
|
|
+ \end{center}
|
|
+ \caption{The \csync2 help message}
|
|
+\end{figure*}
|
|
+
|
|
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
|
+
|
|
+Simply calling {\tt csync2} without any additional arguments prints out a
|
|
+help message (Figure 4). A more detailed description of the most
|
|
+important usage scenarios is given in the next sections.
|
|
+
|
|
+\subsection{Just synchronizing the files}
|
|
+
|
|
+The command {\tt csync2 -x} (or {\tt csync2 -xv}) checks for local changes and
|
|
+tries to synchronize them to the other hosts. The option {\tt -d} (dry-run) can
|
|
+be used to do everything but the actual synchronization.
|
|
+
|
|
+When you start \csync2 the first time it compares its empty database with the
|
|
+filesystem and sees that all files just have been created. It then will try
|
|
+to synchronize the files. If the file is not present on the remote hosts it
|
|
+will simply be copied to the other host. There also is no problem if the file
|
|
+is already present on the remote host and has the same content. But if the
|
|
+file already exists on the remote host and has a different content, you
|
|
+have your first conflict.
|
|
+
|
|
+\subsection{Resolving a conflict}
|
|
+
|
|
+When two or more hosts in a \csync2 synchronization group have detected changes
|
|
+for the same file we run into a conflict: \csync2 can not know which version is
|
|
+the right one (unless an auto-resolving method has been specified in the
|
|
+configuration file). The cluster administrator needs to tell \csync2 which
|
|
+version is the correct one. This can be done using {\tt \csync2 -f}, e.g.:
|
|
+
|
|
+\begin{verbatim}
|
|
+# csync2 -x
|
|
+While syncing file /etc/hosts:
|
|
+ERROR from peer apollo:
|
|
+ File is also marked dirty here!
|
|
+Finished with 1 errors.
|
|
+
|
|
+# csync2 -f /etc/hosts
|
|
+# csync2 -xv
|
|
+Connecting to host apollo (PLAIN) ...
|
|
+Updating /etc/hosts on apollo ...
|
|
+Finished with 0 errors.
|
|
+\end{verbatim}
|
|
+
|
|
+\subsection{Checking without syncing}
|
|
+
|
|
+It is also possible to just check the local filesystem without doing any
|
|
+connections to remote hosts: {\tt csync2 -cr /} (the {\tt -r} modifier
|
|
+tells \csync2 to do a recursive check).
|
|
+
|
|
+{\tt csync2 -c} without any additional parameters checks all files listed
|
|
+in the {\tt hints} table.
|
|
+
|
|
+The command {\tt csync2 -M} can be used to print the list of files marked dirty
|
|
+and therfore scheduled for synchronization.
|
|
+
|
|
+\subsection{Comparing the hosts}
|
|
+
|
|
+The {\tt csync2 -T} command can be used to compare the local database with the
|
|
+database of the remote hosts. Note that this command compares the databases and
|
|
+not the filesystems - so make sure that the databases are up-to-date on all
|
|
+hosts before running {\tt csync2 -T} and run {\tt csync2 -cr /} if you are
|
|
+unsure.
|
|
+
|
|
+The output of {\tt csync2 -T} is a table with 4 columns:
|
|
+
|
|
+{\bf 1.} The type of the found difference: {\tt X} means that the file exists
|
|
+on both hosts but is different, {\tt L} that the file is only present on the
|
|
+local host and {\tt R} that the file is only present on the remote host.
|
|
+
|
|
+{\bf 2.} The local interface DNS name (usually just the local hostname).
|
|
+
|
|
+{\bf 3.} The remote interface DNS name (usually just the remote hostname).
|
|
+
|
|
+{\bf 4.} The filename.
|
|
+
|
|
+The {\tt csync2 -TT {\it filename}} command can be used for displaying unified
|
|
+diffs between a local file and the remote hosts.
|
|
+
|
|
+\subsection{Bootstrapping large setups}
|
|
+
|
|
+The {\tt -I} option is a nice tool for bootstrapping larger \csync2
|
|
+installations on slower networks. In such scenarios one usually wants to
|
|
+initially replicate the data using a more efficient way and then use \csync2 to
|
|
+synchronize the changes on a regular basis.
|
|
+
|
|
+The problem here is that when you start \csync2 the first time it detects a lot
|
|
+of newly created files and wants to synchronize them, just to find out that
|
|
+they are already in sync with the peers.
|
|
+
|
|
+The {\tt -I} option modifies the behavior of {\tt -c} so it only updates the
|
|
+{\tt file} table but does not create entries in the {\tt dirty} table. So you
|
|
+can simply use {\tt csync2 -cIr /} to initially create the \csync2 database on
|
|
+the cluster nodes when you know for sure that the hosts are already in sync.
|
|
+
|
|
+The {\tt -I} option may also be used with {\tt -T} to add the detected
|
|
+differences to the dirty table and so induce \csync2 to synchronize the local
|
|
+status of the files in question to the remote host.
|
|
+
|
|
+Usually {\tt -TI} does only schedule local files which do exist to the dirty
|
|
+database. That means that it does not induce \csync2 to remove a file on a
|
|
+remote host if it does not exist on the local host. That behavior can be
|
|
+changed using the {\tt -X} option.
|
|
+
|
|
+The files scheduled to be synced by {\tt -TI} are usually scheduled to be
|
|
+synced to all peers, not just the one peer which has been used in the {\tt -TI}
|
|
+run. This behavior can be changed using the {\tt -U} option.
|
|
+
|
|
+\subsection{Cleaning up the database}
|
|
+
|
|
+It can happen that old data is left over in the \csync2 database after a
|
|
+configuration change (e.g. files and hosts which are not referred anymore
|
|
+by the configuration file). Running {\tt csync2 -R} cleans up such old
|
|
+entries in the \csync2 database.
|
|
+
|
|
+\subsection{Multiple Configurations}
|
|
+
|
|
+Sometimes a higher abstracion level than simply having different
|
|
+synchronization groups is needed. For such cases it is possible to use multiple
|
|
+configuration files (and databases) side by side.
|
|
+
|
|
+The additional configurations must have a unique name. The configuration file
|
|
+is then named {\tt /etc/csync2\_{\it myname}.cfg} and the database is named
|
|
+{\tt /var/lib/csync2/{\it hostname}\_{\it myname}.db}. Accordingly \csync2 must
|
|
+be called with the {\tt -C {\it myname}} option.
|
|
+
|
|
+But there is no need for multiple \csync2 daemons. The \csync2 protocol allows
|
|
+the client to tell the server which configuration should be used for the
|
|
+current TCP connection.
|
|
+
|
|
+\section{Performance}
|
|
+
|
|
+In most cases \csync2 is used for syncing just some (up to a few hundred) system
|
|
+configuration files. In these cases all \csync2 calls are processed in less than
|
|
+one second, even on slow hardware. So a performance analysis is not interesting
|
|
+for these cases but only for setups where a huge amount of files is synced,
|
|
+e.g. when syncing entire application images with \csync2.
|
|
+
|
|
+A well-founded performance analysis which would allow meaningful comparisons
|
|
+with other synchronization tools would be beyond the scope of this paper.
|
|
+So here are just some quick and dirty numbers from a production
|
|
+2-node cluster (2.40GHz dual-Xeon, 7200 RPM ATA HD, 1 GB Ram). The machines
|
|
+had an average load of 0.3 (web and mail) during my tests..
|
|
+
|
|
+I have about 128.000 files (1.7 GB) of Linux kernel sources and object
|
|
+files on an ext3 filesystem under \csync2 control on the machines.
|
|
+
|
|
+Checking for changes ({\tt csync2 -cr /}) took 13.7 seconds wall clock time,
|
|
+9.1 seconds in user mode and 4.1 seconds in kernel mode. The remaining 0.5
|
|
+seconds were spent in other processes.
|
|
+
|
|
+Recreating the local database without adding the files to dirty table ({\tt
|
|
+csync2 -cIr} after removing the database file) took 28.5 seconds (18.6 sec
|
|
+user mode and 2.6 sec kernel mode).
|
|
+
|
|
+Comparing the \csync2 databases of both hosts ({\tt csync2 -T}) took 3 seconds
|
|
+wall clock time.
|
|
+
|
|
+Running {\tt csync2 -u} after adding all 128.000 files took 10 minutes wall
|
|
+clock time. That means that \csync2 tried to sync all 128.000 files and then
|
|
+recognized that the remote side had already the most up-to-date version of
|
|
+the file after comparing the checksums.
|
|
+
|
|
+All numbers are the average values of 10 iterations.
|
|
+
|
|
+\section{Security Notes}
|
|
+
|
|
+As statet earlier, authentication is performed using the peer IP address and a
|
|
+pre-shared-key. The traffic is SSL encrypted and the SSL certificate of the
|
|
+peer is checked when there has been already an SSL connection to that peer in
|
|
+the past (i.e.~the peer certificate is already cached in the database).
|
|
+
|
|
+All DNS names used in the \csync2 configuration file (the {\tt host} records)
|
|
+should be resolvable via the {\tt /etc/hosts} file to guard against DNS
|
|
+spoofing attacks.
|
|
+
|
|
+Depending on the list of files being managed by \csync2, an intruder on one of
|
|
+the cluster nodes can also modify the files under \csync2 control on the other
|
|
+cluster nodes and so might also gain access on them. However, an intruder can
|
|
+not modify any other files on the other hosts because \csync2 checks on the
|
|
+receiving side if all updates are OK according to the configuration file.
|
|
+
|
|
+For sure, an intruder would be able to work around this security checks when
|
|
+\csync2 is also used to sync the \csync2 configuration files.
|
|
+
|
|
+\csync2 only syncs the standard UNIX permissions (uid, gid and file mode).
|
|
+ACLs, Linux ext2fs/ext3fs attributes and other extended filesystem permissions
|
|
+are neither synced nor flushed (e.g. if they are set automatically when
|
|
+the file is created).
|
|
+
|
|
+\section{Alternatives}
|
|
+
|
|
+\csync2 is not the only file synchronization tool. Some of the other
|
|
+free software file synchronization tools are:
|
|
+
|
|
+\subsection{Rsync}
|
|
+
|
|
+Rsync [7] is a tool for fast incremental file transfers, but is not a
|
|
+synchronization tool in the context of this paper. Actually \csync2 is
|
|
+using the rsync algorithm for file transfers. A variety of synchronization
|
|
+tools have been written on top of rsync. Most of them are tiny shell scripts.
|
|
+
|
|
+\subsection{Unison}
|
|
+
|
|
+Unison [8] is using an algorithm similar to the one used by \csync2, but is
|
|
+limited to two-host setups. Its focus is on interactive syncs (there even are
|
|
+graphical user interfaces) and it is targeting on syncing home directories
|
|
+between a laptop and a workstation. Unison is pretty intuitive to use, among
|
|
+other things because of its limitations.
|
|
+
|
|
+\subsection{Version Control Systems}
|
|
+
|
|
+Version control systems such as Subversion [9] can also be used to synchronize
|
|
+configuration files or application images. The advantage of version control
|
|
+systems is that they can do three way merges and preserve the entire history
|
|
+of a repository. The disadvantage is that they are much slower and require more
|
|
+disk space than plain synchronization tools.
|
|
+
|
|
+\section{References}
|
|
+
|
|
+{[1]} \csync2 \\
|
|
+http://oss.linbit.com/csync2/
|
|
+\medskip \\
|
|
+{[2]} LINBIT Information Technologies \\
|
|
+http://www.linbit.com/
|
|
+\medskip \\
|
|
+{[3]} DRBD \\
|
|
+http://www.drbd.org/
|
|
+\medskip \\
|
|
+{[4]} Librsync \\
|
|
+http://librsync.sourceforge.net/
|
|
+\medskip \\
|
|
+{[5]} SQLite \\
|
|
+http://www.sqlite.org/
|
|
+\medskip \\
|
|
+{[6]} ROCK Linux \\
|
|
+http://www.rocklinux.org/
|
|
+\medskip \\
|
|
+{[7]} Rsync \\
|
|
+http://samba.anu.edu.au/rsync/
|
|
+\medskip \\
|
|
+{[8]} Unison \\
|
|
+http://www.cis.upenn.edu/\textasciitilde{}bcpierce/unison/
|
|
+\medskip \\
|
|
+{[9]} Subversion \\
|
|
+http://subversion.tigris.org/
|
|
+
|
|
+\end{document}
|
|
diff --git a/error.c b/error.c
|
|
index 26f04b2..82f2f3f 100644
|
|
--- a/error.c
|
|
+++ b/error.c
|
|
@@ -26,6 +26,7 @@
|
|
#include <time.h>
|
|
#include <sys/types.h>
|
|
#include <unistd.h>
|
|
+#include <syslog.h>
|
|
|
|
long csync_last_printtime = 0;
|
|
FILE *csync_timestamp_out = 0;
|
|
@@ -117,20 +118,29 @@ void csync_debug(int lv, const char *fmt, ...)
|
|
{
|
|
va_list ap;
|
|
|
|
- csync_printtime();
|
|
-
|
|
if ( csync_debug_level < lv ) return;
|
|
|
|
- if (csync_timestamps)
|
|
- csync_printtime_prefix();
|
|
+ if (!csync_syslog) {
|
|
+ csync_printtime();
|
|
+
|
|
+ if (csync_timestamps)
|
|
+ csync_printtime_prefix();
|
|
|
|
- if ( csync_server_child_pid )
|
|
- fprintf(csync_debug_out, "<%d> ", csync_server_child_pid);
|
|
-
|
|
- va_start(ap, fmt);
|
|
- vfprintf(csync_debug_out, fmt, ap);
|
|
- va_end(ap);
|
|
+ if ( csync_server_child_pid )
|
|
+ fprintf(csync_debug_out, "<%d> ", csync_server_child_pid);
|
|
|
|
+ va_start(ap, fmt);
|
|
+ vfprintf(csync_debug_out, fmt, ap);
|
|
+ va_end(ap);
|
|
+ // Good / bad with extra line
|
|
+ fprintf(csync_debug_out,"\n");
|
|
+ }
|
|
+ else {
|
|
+ va_start(ap,fmt);
|
|
+ vsyslog(LOG_DEBUG, fmt, ap);
|
|
+ va_end(ap);
|
|
+ }
|
|
csync_messages_printed++;
|
|
}
|
|
|
|
+/* Test 3 */
|
|
diff --git a/getrealfn.c b/getrealfn.c
|
|
index 01d13ce..b2bc0b7 100644
|
|
--- a/getrealfn.c
|
|
+++ b/getrealfn.c
|
|
@@ -53,8 +53,11 @@ char *getrealfn(const char *filename)
|
|
/* make the path absolute */
|
|
if ( *tempfn != '/' ) {
|
|
char *t2, *t1 = my_get_current_dir_name();
|
|
- asprintf(&t2, "%s/%s", t1, tempfn);
|
|
- free(t1); free(tempfn); tempfn = t2;
|
|
+
|
|
+ ASPRINTF(&t2, "%s/%s", t1, tempfn);
|
|
+ free(t1);
|
|
+ free(tempfn);
|
|
+ tempfn = t2;
|
|
}
|
|
|
|
/* remove leading slashes from tempfn */
|
|
@@ -108,7 +111,7 @@ char *getrealfn(const char *filename)
|
|
if ( !chdir(tempfn) ) {
|
|
char *t2, *t1 = my_get_current_dir_name();
|
|
if ( st_mark ) {
|
|
- asprintf(&t2, "%s/%s", t1, st_mark+1);
|
|
+ ASPRINTF(&t2, "%s/%s", t1, st_mark+1);
|
|
free(tempfn); free(t1); tempfn = t2;
|
|
} else {
|
|
free(tempfn); tempfn = t1;
|
|
diff --git a/groups.c b/groups.c
|
|
index 1ff9a1a..511586e 100644
|
|
--- a/groups.c
|
|
+++ b/groups.c
|
|
@@ -41,8 +41,9 @@ int match_pattern_list(
|
|
matched = 1;
|
|
}
|
|
} else {
|
|
+ int fnm_pathname = p->star_matches_slashes ? 0 : FNM_PATHNAME;
|
|
if ( !fnmatch(p->pattern, filename,
|
|
- FNM_LEADING_DIR|FNM_PATHNAME) ) {
|
|
+ FNM_LEADING_DIR|fnm_pathname) ) {
|
|
match_path = p->isinclude;
|
|
matched = 1;
|
|
}
|
|
@@ -91,10 +92,11 @@ int csync_step_into(const char *file)
|
|
continue;
|
|
if ( (p->pattern[0] == '/' || p->pattern[0] == '%') && p->isinclude ) {
|
|
char t[strlen(p->pattern)+1], *l;
|
|
+ int fnm_pathname = p->star_matches_slashes ? 0 : FNM_PATHNAME;
|
|
strcpy(t, p->pattern);
|
|
while ( (l=strrchr(t, '/')) != 0 ) {
|
|
*l = 0;
|
|
- if ( !fnmatch(t, file, FNM_PATHNAME) )
|
|
+ if ( !fnmatch(t, file, fnm_pathname) )
|
|
return 1;
|
|
}
|
|
}
|
|
diff --git a/prefixsubst.c b/prefixsubst.c
|
|
index 6adedd4..d003bb5 100644
|
|
--- a/prefixsubst.c
|
|
+++ b/prefixsubst.c
|
|
@@ -46,7 +46,7 @@ const char *prefixsubst(const char *in)
|
|
ringbuff_counter = (ringbuff_counter+1) % RINGBUFF_LEN;
|
|
if (ringbuff[ringbuff_counter])
|
|
free(ringbuff[ringbuff_counter]);
|
|
- asprintf(&ringbuff[ringbuff_counter], "%s%s", p->path, path);
|
|
+ ASPRINTF(&ringbuff[ringbuff_counter], "%s%s", p->path, path);
|
|
return ringbuff[ringbuff_counter];
|
|
}
|
|
}
|
|
@@ -56,3 +56,35 @@ const char *prefixsubst(const char *in)
|
|
return 0;
|
|
}
|
|
|
|
+const char *prefixencode(const char *filename) {
|
|
+#if __CYGWIN__
|
|
+ if (!strcmp(filename, "/")) {
|
|
+ filename = "/cygdrive";
|
|
+ }
|
|
+#endif
|
|
+ struct csync_prefix *p = csync_prefix;
|
|
+
|
|
+ /*
|
|
+ * Canonicalized paths will always contain /
|
|
+ * Prefixsubsted paths will probably contain %
|
|
+ */
|
|
+ if (*filename == '/')
|
|
+ while (p) {
|
|
+ if (p->path) {
|
|
+ int p_len = strlen(p->path);
|
|
+ int f_len = strlen(filename);
|
|
+
|
|
+ if (p_len <= f_len && !strncmp(p->path, filename, p_len) &&
|
|
+ (filename[p_len] == '/' || !filename[p_len])) {
|
|
+ ringbuff_counter = (ringbuff_counter+1) % RINGBUFF_LEN;
|
|
+ if (ringbuff[ringbuff_counter])
|
|
+ free(ringbuff[ringbuff_counter]);
|
|
+ ASPRINTF(&ringbuff[ringbuff_counter], "%%%s%%%s", p->name, filename+p_len);
|
|
+ return ringbuff[ringbuff_counter];
|
|
+ }
|
|
+ }
|
|
+ p = p->next;
|
|
+ }
|
|
+ return filename;
|
|
+}
|
|
+
|
|
diff --git a/release.sh b/release.sh
|
|
new file mode 100755
|
|
index 0000000..8ee447c
|
|
--- /dev/null
|
|
+++ b/release.sh
|
|
@@ -0,0 +1,69 @@
|
|
+#!/bin/bash
|
|
+#
|
|
+# csync2 - cluster synchronization tool, 2nd generation
|
|
+# LINBIT Information Technologies GmbH <http://www.linbit.com>
|
|
+# Copyright (C) 2004, 2005 Clifford Wolf <clifford@clifford.at>
|
|
+#
|
|
+# This program is free software; you can redistribute it and/or modify
|
|
+# it under the terms of the GNU General Public License as published by
|
|
+# the Free Software Foundation; either version 2 of the License, or
|
|
+# (at your option) any later version.
|
|
+#
|
|
+# This program is distributed in the hope that it will be useful,
|
|
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
+# GNU General Public License for more details.
|
|
+#
|
|
+# You should have received a copy of the GNU General Public License
|
|
+# along with this program; if not, write to the Free Software
|
|
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
|
+#
|
|
+#
|
|
+# Internal script for tagging a release in subversion and creating
|
|
+# the source tar file.
|
|
+
|
|
+PACKAGE=csync2
|
|
+URL=http://svn.linbit.com/csync2
|
|
+
|
|
+case "$1" in
|
|
+ -*)
|
|
+ echo "Usage: $0 newversion"
|
|
+ ;;
|
|
+ '')
|
|
+ svn ls $URL/tags | tr -d / | perl -pe '$x=$_; $x=~s/\n/\t/; print $x;
|
|
+ s/(\d+)/sprintf"%04d",$1/eg;' | sort -k2 | cut -f1
|
|
+ ;;
|
|
+ *)
|
|
+ VERSION=$1
|
|
+ set -ex
|
|
+
|
|
+ date "+csync2 ($VERSION-1) unstable; urgency=low%n%n`
|
|
+ ` * New Upstream Version.%n%n -- Clifford Wolf `
|
|
+ `<clifford.wolf@linbit.com> %a, %d %b %Y `
|
|
+ `%H:%M:%S %z%n" > debian/changelog.new
|
|
+ cat debian/changelog >> debian/changelog.new
|
|
+ mv debian/changelog.new debian/changelog
|
|
+ svn commit -m "Added version $VERSION to debian changelog." \
|
|
+ debian/changelog
|
|
+
|
|
+ svn cp -m "Tagged version $VERSION" \
|
|
+ $URL/trunk $URL/tags/$PACKAGE-$VERSION
|
|
+ svn co $URL/tags/$PACKAGE-$VERSION ../$PACKAGE-$VERSION
|
|
+
|
|
+ cd ../$PACKAGE-$VERSION
|
|
+ svn rm release.sh copycheck.sh
|
|
+ perl -pi -e "s/SNAPSHOT/$VERSION/g" configure.ac
|
|
+ perl -pi -e "s/SNAPSHOT/$VERSION/g" csync2.spec
|
|
+ svn commit -m "Fixed version info in tag $VERSION"
|
|
+
|
|
+ sleep 2
|
|
+ wget -O paper.pdf http://www.clifford.at/papers/2005/csync2/paper.pdf
|
|
+ ./autogen.sh; rm -rf autom4te.cache debian/ $( find -name .svn )
|
|
+
|
|
+ cd ..
|
|
+ tar cvzf $PACKAGE-$VERSION.tar.gz \
|
|
+ --owner=0 --group=0 $PACKAGE-$VERSION
|
|
+ rm -rf $PACKAGE-$VERSION
|
|
+ ;;
|
|
+esac
|
|
+
|
|
diff --git a/rsync.c b/rsync.c
|
|
index e4a918c..86482ee 100644
|
|
--- a/rsync.c
|
|
+++ b/rsync.c
|
|
@@ -25,10 +25,188 @@
|
|
#include <errno.h>
|
|
#include <stdio.h>
|
|
|
|
+/* for tmpfile replacement: */
|
|
+#include <sys/types.h>
|
|
+#include <sys/stat.h>
|
|
+#include <fcntl.h>
|
|
+
|
|
+/* for MAXPATHLEN */
|
|
+#include <sys/param.h>
|
|
+
|
|
+
|
|
#ifdef __CYGWIN__
|
|
#include <w32api/windows.h>
|
|
#endif
|
|
|
|
+
|
|
+/* This has been taken from rsync:lib/compat.c */
|
|
+
|
|
+/**
|
|
+ * Like strncpy but does not 0 fill the buffer and always null
|
|
+ * terminates.
|
|
+ *
|
|
+ * @param bufsize is the size of the destination buffer.
|
|
+ *
|
|
+ * @return index of the terminating byte.
|
|
+ **/
|
|
+static size_t strlcpy(char *d, const char *s, size_t bufsize)
|
|
+{
|
|
+ size_t len = strlen(s);
|
|
+ size_t ret = len;
|
|
+ if (bufsize > 0) {
|
|
+ if (len >= bufsize)
|
|
+ len = bufsize-1;
|
|
+ memcpy(d, s, len);
|
|
+ d[len] = 0;
|
|
+ }
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+
|
|
+/* This has been taken from rsync sources: receiver.c */
|
|
+
|
|
+#define TMPNAME_SUFFIX ".XXXXXX"
|
|
+#define TMPNAME_SUFFIX_LEN ((int)sizeof TMPNAME_SUFFIX - 1)
|
|
+#define MAX_UNIQUE_NUMBER 999999
|
|
+#define MAX_UNIQUE_LOOP 100
|
|
+
|
|
+/* get_tmpname() - create a tmp filename for a given filename
|
|
+ *
|
|
+ * If a tmpdir is defined, use that as the directory to put it in. Otherwise,
|
|
+ * the tmp filename is in the same directory as the given name. Note that
|
|
+ * there may be no directory at all in the given name!
|
|
+ *
|
|
+ * The tmp filename is basically the given filename with a dot prepended, and
|
|
+ * .XXXXXX appended (for mkstemp() to put its unique gunk in). We take care
|
|
+ * to not exceed either the MAXPATHLEN or NAME_MAX, especially the last, as
|
|
+ * the basename basically becomes 8 characters longer. In such a case, the
|
|
+ * original name is shortened sufficiently to make it all fit.
|
|
+ *
|
|
+ * If the make_unique arg is True, the XXXXXX string is replaced with a unique
|
|
+ * string that doesn't exist at the time of the check. This is intended to be
|
|
+ * used for creating hard links, symlinks, devices, and special files, since
|
|
+ * normal files should be handled by mkstemp() for safety.
|
|
+ *
|
|
+ * Of course, the only reason the file is based on the original name is to
|
|
+ * make it easier to figure out what purpose a temp file is serving when a
|
|
+ * transfer is in progress. */
|
|
+
|
|
+static int get_tmpname(char *fnametmp, const char *fname)
|
|
+{
|
|
+ int maxname, added, length = 0;
|
|
+ const char *f;
|
|
+ char *suf;
|
|
+
|
|
+ static unsigned counter_limit;
|
|
+ unsigned counter;
|
|
+
|
|
+ if ((f = strrchr(fname, '/')) != NULL) {
|
|
+ ++f;
|
|
+ length = f - fname;
|
|
+ /* copy up to and including the slash */
|
|
+ strlcpy(fnametmp, fname, length + 1);
|
|
+ } else
|
|
+ f = fname;
|
|
+ fnametmp[length++] = '.';
|
|
+
|
|
+ /* The maxname value is bufsize, and includes space for the '\0'.
|
|
+ * NAME_MAX needs an extra -1 for the name's leading dot. */
|
|
+ maxname = MIN(MAXPATHLEN - length - TMPNAME_SUFFIX_LEN,
|
|
+ NAME_MAX - 1 - TMPNAME_SUFFIX_LEN);
|
|
+
|
|
+ if (maxname < 1) {
|
|
+ csync_debug(1, "temporary filename too long: %s\n", fname);
|
|
+ fnametmp[0] = '\0';
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ added = strlcpy(fnametmp + length, f, maxname);
|
|
+ if (added >= maxname)
|
|
+ added = maxname - 1;
|
|
+ suf = fnametmp + length + added;
|
|
+
|
|
+ if (!counter_limit) {
|
|
+ counter_limit = (unsigned)getpid() + MAX_UNIQUE_LOOP;
|
|
+ if (counter_limit > MAX_UNIQUE_NUMBER || counter_limit < MAX_UNIQUE_LOOP)
|
|
+ counter_limit = MAX_UNIQUE_LOOP;
|
|
+
|
|
+ counter = counter_limit - MAX_UNIQUE_LOOP;
|
|
+
|
|
+ /* This doesn't have to be very good because we don't need
|
|
+ * to worry about someone trying to guess the values: all
|
|
+ * a conflict will do is cause a device, special file, hard
|
|
+ * link, or symlink to fail to be created. Also: avoid
|
|
+ * using mktemp() due to gcc's annoying warning. */
|
|
+ while (1) {
|
|
+ snprintf(suf, TMPNAME_SUFFIX_LEN+1, ".%d", counter);
|
|
+ if (access(fnametmp, 0) < 0)
|
|
+ break;
|
|
+ if (++counter >= counter_limit)
|
|
+ return 0;
|
|
+ }
|
|
+ } else
|
|
+ memcpy(suf, TMPNAME_SUFFIX, TMPNAME_SUFFIX_LEN+1);
|
|
+
|
|
+ return 1;
|
|
+}
|
|
+
|
|
+
|
|
+/* Returns open file handle for a temp file that resides in the
|
|
+ same directory as file fname. The file must be removed after
|
|
+ usage.
|
|
+*/
|
|
+
|
|
+static FILE *open_temp_file(char *fnametmp, const char *fname)
|
|
+{
|
|
+ FILE *f;
|
|
+ int fd;
|
|
+
|
|
+ if (get_tmpname(fnametmp, fname) == 0) {
|
|
+ csync_debug(1, "ERROR: Couldn't find tempname for file %s\n", fname);
|
|
+ return NULL;
|
|
+ }
|
|
+
|
|
+ f = NULL;
|
|
+ fd = open(fnametmp, O_CREAT | O_EXCL | O_RDWR, S_IWUSR | S_IRUSR);
|
|
+ if (fd >= 0) {
|
|
+ f = fdopen(fd, "wb+");
|
|
+ /* not unlinking since rename wouldn't work then */
|
|
+ }
|
|
+ if (fd < 0 || !f) {
|
|
+ csync_debug(1, "ERROR: Could not open result from tempnam(%s)!\n", fnametmp);
|
|
+ return NULL;
|
|
+ }
|
|
+
|
|
+ return f;
|
|
+}
|
|
+
|
|
+
|
|
+
|
|
+#ifdef _SVID_SOURCE
|
|
+static FILE *paranoid_tmpfile()
|
|
+{
|
|
+ char *name;
|
|
+ FILE *f;
|
|
+ int fd;
|
|
+
|
|
+ name = tempnam(csync_tempdir, "csync2");
|
|
+ if (!name)
|
|
+ csync_fatal("ERROR: tempnam() didn't return a valid filename!\n");
|
|
+
|
|
+ f = NULL;
|
|
+ fd = open(name, O_CREAT | O_EXCL | O_RDWR, S_IWUSR | S_IRUSR);
|
|
+ if (fd >= 0) {
|
|
+ f = fdopen(fd, "wb+");
|
|
+ unlink(name);
|
|
+ }
|
|
+ if (fd < 0 || !f)
|
|
+ csync_fatal("ERROR: Could not open result from tempnam(%s)!\n", name);
|
|
+
|
|
+ csync_debug(3, "Tempfilename is %s\n", name);
|
|
+ free(name);
|
|
+ return f;
|
|
+}
|
|
+#else
|
|
static FILE *paranoid_tmpfile()
|
|
{
|
|
FILE *f;
|
|
@@ -41,6 +219,7 @@ static FILE *paranoid_tmpfile()
|
|
|
|
return f;
|
|
}
|
|
+#endif
|
|
|
|
void csync_send_file(FILE *in)
|
|
{
|
|
@@ -119,18 +298,23 @@ int csync_rs_check(const char *filename, int isreg)
|
|
rs_stats_t stats;
|
|
rs_result result;
|
|
long size;
|
|
+ char tmpfname[MAXPATHLEN];
|
|
|
|
csync_debug(3, "Csync2 / Librsync: csync_rs_check('%s', %d [%s])\n",
|
|
filename, isreg, isreg ? "regular file" : "non-regular file");
|
|
|
|
csync_debug(3, "Opening basis_file and sig_file..\n");
|
|
|
|
- sig_file = paranoid_tmpfile();
|
|
+ sig_file = open_temp_file(tmpfname, prefixsubst(filename));
|
|
if ( !sig_file ) goto io_error;
|
|
+ if (unlink(tmpfname) < 0) goto io_error;
|
|
|
|
basis_file = fopen(prefixsubst(filename), "rb");
|
|
- if ( !basis_file ) basis_file = paranoid_tmpfile();
|
|
- if ( !basis_file ) goto io_error;
|
|
+ if ( !basis_file ) { /* ?? why a tmp file? */
|
|
+ basis_file = open_temp_file(tmpfname, prefixsubst(filename));
|
|
+ if ( !basis_file ) goto io_error;
|
|
+ if (unlink(tmpfname) < 0) goto io_error;
|
|
+ }
|
|
|
|
if ( isreg ) {
|
|
csync_debug(3, "Running rs_sig_file() from librsync....\n");
|
|
@@ -204,14 +388,19 @@ error:;
|
|
|
|
void csync_rs_sig(const char *filename)
|
|
{
|
|
- FILE *basis_file, *sig_file;
|
|
+ FILE *basis_file = 0, *sig_file = 0;
|
|
rs_stats_t stats;
|
|
rs_result result;
|
|
+ char tmpfname[MAXPATHLEN];
|
|
|
|
csync_debug(3, "Csync2 / Librsync: csync_rs_sig('%s')\n", filename);
|
|
|
|
csync_debug(3, "Opening basis_file and sig_file..\n");
|
|
- sig_file = paranoid_tmpfile();
|
|
+
|
|
+ sig_file = open_temp_file(tmpfname, prefixsubst(filename));
|
|
+ if ( !sig_file ) goto io_error;
|
|
+ if (unlink(tmpfname) < 0) goto io_error;
|
|
+
|
|
basis_file = fopen(prefixsubst(filename), "rb");
|
|
if ( !basis_file ) basis_file = fopen("/dev/null", "rb");
|
|
|
|
@@ -227,19 +416,34 @@ void csync_rs_sig(const char *filename)
|
|
csync_debug(3, "Signature has been created successfully.\n");
|
|
fclose(basis_file);
|
|
fclose(sig_file);
|
|
+
|
|
+ return;
|
|
+
|
|
+io_error:
|
|
+ csync_debug(0, "I/O Error '%s' in rsync-sig: %s\n",
|
|
+ strerror(errno), prefixsubst(filename));
|
|
+
|
|
+ if (basis_file) fclose(basis_file);
|
|
+ if (sig_file) fclose(sig_file);
|
|
}
|
|
|
|
+
|
|
+
|
|
int csync_rs_delta(const char *filename)
|
|
{
|
|
- FILE *sig_file, *new_file, *delta_file;
|
|
+ FILE *sig_file = 0, *new_file = 0, *delta_file = 0;
|
|
rs_result result;
|
|
rs_signature_t *sumset;
|
|
rs_stats_t stats;
|
|
+ char tmpfname[MAXPATHLEN];
|
|
|
|
csync_debug(3, "Csync2 / Librsync: csync_rs_delta('%s')\n", filename);
|
|
|
|
csync_debug(3, "Receiving sig_file from peer..\n");
|
|
- sig_file = paranoid_tmpfile();
|
|
+ sig_file = open_temp_file(tmpfname, prefixsubst(filename));
|
|
+ if ( !sig_file ) goto io_error;
|
|
+ if (unlink(tmpfname) < 0) goto io_error;
|
|
+
|
|
if ( csync_recv_file(sig_file) ) {
|
|
fclose(sig_file);
|
|
return -1;
|
|
@@ -260,7 +464,10 @@ int csync_rs_delta(const char *filename)
|
|
errno = backup_errno;
|
|
return -1;
|
|
}
|
|
- delta_file = paranoid_tmpfile();
|
|
+
|
|
+ delta_file = open_temp_file(tmpfname, prefixsubst(filename));
|
|
+ if ( !delta_file ) goto io_error;
|
|
+ if (unlink(tmpfname) < 0) goto io_error;
|
|
|
|
csync_debug(3, "Running rs_build_hash_table() from librsync..\n");
|
|
result = rs_build_hash_table(sumset);
|
|
@@ -281,6 +488,16 @@ int csync_rs_delta(const char *filename)
|
|
fclose(new_file);
|
|
|
|
return 0;
|
|
+
|
|
+io_error:
|
|
+ csync_debug(0, "I/O Error '%s' in rsync-delta: %s\n",
|
|
+ strerror(errno), prefixsubst(filename));
|
|
+
|
|
+ if (new_file) fclose(new_file);
|
|
+ if (delta_file) fclose(delta_file);
|
|
+ if (sig_file) fclose(sig_file);
|
|
+
|
|
+ return -1;
|
|
}
|
|
|
|
int csync_rs_patch(const char *filename)
|
|
@@ -289,24 +506,27 @@ int csync_rs_patch(const char *filename)
|
|
int backup_errno;
|
|
rs_stats_t stats;
|
|
rs_result result;
|
|
- char buffer[512];
|
|
char *errstr = "?";
|
|
- int rc;
|
|
+ char tmpfname[MAXPATHLEN], newfname[MAXPATHLEN];
|
|
|
|
csync_debug(3, "Csync2 / Librsync: csync_rs_patch('%s')\n", filename);
|
|
|
|
csync_debug(3, "Receiving delta_file from peer..\n");
|
|
- delta_file = paranoid_tmpfile();
|
|
+ delta_file = open_temp_file(tmpfname, prefixsubst(filename));
|
|
if ( !delta_file ) { errstr="creating delta temp file"; goto io_error; }
|
|
+ if (unlink(tmpfname) < 0) { errstr="removing delta temp file"; goto io_error; }
|
|
if ( csync_recv_file(delta_file) ) goto error;
|
|
|
|
csync_debug(3, "Opening to be patched file on local host..\n");
|
|
basis_file = fopen(prefixsubst(filename), "rb");
|
|
- if ( !basis_file ) basis_file = paranoid_tmpfile();
|
|
- if ( !basis_file ) { errstr="opening data file for reading"; goto io_error; }
|
|
+ if ( !basis_file ) {
|
|
+ basis_file = open_temp_file(tmpfname, prefixsubst(filename));
|
|
+ if ( !basis_file ) { errstr="opening data file for reading"; goto io_error; }
|
|
+ if (unlink(tmpfname) < 0) { errstr="removing data temp file"; goto io_error; }
|
|
+ }
|
|
|
|
csync_debug(3, "Opening temp file for new data on local host..\n");
|
|
- new_file = paranoid_tmpfile();
|
|
+ new_file = open_temp_file(newfname, prefixsubst(filename));
|
|
if ( !new_file ) { errstr="creating new data temp file"; goto io_error; }
|
|
|
|
csync_debug(3, "Running rs_patch_file() from librsync..\n");
|
|
@@ -316,12 +536,12 @@ int csync_rs_patch(const char *filename)
|
|
goto error;
|
|
}
|
|
|
|
- csync_debug(3, "Copying new data to local file..\n");
|
|
+ csync_debug(3, "Renaming tmp file to data file..\n");
|
|
fclose(basis_file);
|
|
- rewind(new_file);
|
|
- unlink(prefixsubst(filename));
|
|
|
|
#ifdef __CYGWIN__
|
|
+
|
|
+/* TODO: needed? */
|
|
// This creates the file using the native windows API, bypassing
|
|
// the cygwin wrappers and so making sure that we do not mess up the
|
|
// permissions..
|
|
@@ -350,14 +570,9 @@ int csync_rs_patch(const char *filename)
|
|
}
|
|
#endif
|
|
|
|
- basis_file = fopen(prefixsubst(filename), "wb");
|
|
- if ( !basis_file ) { errstr="opening data file for writing"; goto io_error; }
|
|
-
|
|
- while ( (rc = fread(buffer, 1, 512, new_file)) > 0 )
|
|
- fwrite(buffer, rc, 1, basis_file);
|
|
+ if (rename(newfname, prefixsubst(filename)) < 0) { errstr="renaming tmp file to to be patched file"; goto io_error; }
|
|
|
|
csync_debug(3, "File has been patched successfully.\n");
|
|
- fclose(basis_file);
|
|
fclose(delta_file);
|
|
fclose(new_file);
|
|
|
|
diff --git a/update.c b/update.c
|
|
index 7c55113..b2c2b85 100644
|
|
--- a/update.c
|
|
+++ b/update.c
|
|
@@ -44,7 +44,9 @@ int read_conn_status(const char *file, const char *host)
|
|
}
|
|
if ( file )
|
|
csync_debug(0, "While syncing file %s:\n", file);
|
|
- csync_debug(0, "ERROR from peer %s: %s", host, line);
|
|
+ else
|
|
+ file = "<no file>";
|
|
+ csync_debug(0, "ERROR from peer(%s): %s %s", file, host, line);
|
|
csync_error_count++;
|
|
return !strcmp(line, "File is also marked dirty here!") ? 1 : 2;
|
|
}
|
|
@@ -70,7 +72,7 @@ int connect_to_host(const char *peername)
|
|
if ( conn_open(peername) ) return -1;
|
|
|
|
if ( use_ssl ) {
|
|
-#if HAVE_LIBGNUTLS_OPENSSL
|
|
+#if HAVE_LIBGNUTLS
|
|
conn_printf("SSL\n");
|
|
if ( read_conn_status(0, peername) ) {
|
|
csync_debug(1, "SSL command failed.\n");
|
|
@@ -196,7 +198,8 @@ auto_resolve_entry_point:
|
|
csync_debug(1, "File is already up to date on peer.\n");
|
|
if ( dry_run ) {
|
|
printf("?S: %-15s %s\n", peername, filename);
|
|
- return;
|
|
+ // DS Remove local dirty, even in dry run
|
|
+ // return;
|
|
}
|
|
goto skip_action;
|
|
}
|
|
@@ -332,7 +335,8 @@ auto_resolve_entry_point:
|
|
csync_debug(1, "File is already up to date on peer.\n");
|
|
if ( dry_run ) {
|
|
printf("?S: %-15s %s\n", peername, filename);
|
|
- return;
|
|
+ // DS also skip on dry_run
|
|
+ // return;
|
|
}
|
|
goto skip_action;
|
|
}
|
|
@@ -540,17 +544,16 @@ void csync_update_host(const char *peername,
|
|
struct textlist *tl_mod = 0, **last_tn=&tl;
|
|
char *current_name = 0;
|
|
struct stat st;
|
|
-
|
|
SQL_BEGIN("Get files for host from dirty table",
|
|
- "SELECT filename, myname, force FROM dirty WHERE peername = '%s' "
|
|
+ "SELECT filename, myname, forced FROM dirty WHERE peername = '%s' "
|
|
"ORDER by filename ASC", url_encode(peername))
|
|
{
|
|
- const char *filename = url_decode(SQL_V[0]);
|
|
+ const char *filename = url_decode(SQL_V(0));
|
|
int i, use_this = patnum == 0;
|
|
for (i=0; i<patnum && !use_this; i++)
|
|
if ( compare_files(filename, patlist[i], recursive) ) use_this = 1;
|
|
if (use_this)
|
|
- textlist_add2(&tl, filename, url_decode(SQL_V[1]), atoi(SQL_V[2]));
|
|
+ textlist_add2(&tl, filename, url_decode(SQL_V(1)), atoi(SQL_V(2)));
|
|
} SQL_END;
|
|
|
|
/* just return if there are no files to update */
|
|
@@ -558,7 +561,7 @@ void csync_update_host(const char *peername,
|
|
|
|
if ( connect_to_host(peername) ) {
|
|
csync_error_count++;
|
|
- csync_debug(0, "ERROR: Connection to remote host failed.\n");
|
|
+ csync_debug(0, "ERROR: Connection to remote host `%s' failed.\n", peername);
|
|
csync_debug(1, "Host stays in dirty state. "
|
|
"Try again later...\n");
|
|
return;
|
|
@@ -584,6 +587,7 @@ void csync_update_host(const char *peername,
|
|
t->next = tl_mod;
|
|
tl_mod = t;
|
|
} else {
|
|
+ csync_debug(3, "Dirty item %s %s %d \n", t->value, t->value2, t->intvalue);
|
|
if ( !current_name || strcmp(current_name, t->value2) ) {
|
|
conn_printf("HELLO %s\n", url_encode(t->value2));
|
|
if ( read_conn_status(t->value, peername) )
|
|
@@ -600,6 +604,7 @@ ident_failed_1:
|
|
|
|
for (t = tl_mod; t != 0; t = t->next) {
|
|
if ( !current_name || strcmp(current_name, t->value2) ) {
|
|
+ csync_debug(3, "Dirty item %s %s %d ", t->value, t->value2, t->intvalue);
|
|
conn_printf("HELLO %s\n", url_encode(t->value2));
|
|
if ( read_conn_status(t->value, peername) )
|
|
goto ident_failed_2;
|
|
@@ -624,9 +629,9 @@ void csync_update(const char ** patlist, int patnum, int recursive, int dry_run)
|
|
struct textlist *tl = 0, *t;
|
|
|
|
SQL_BEGIN("Get hosts from dirty table",
|
|
- "SELECT peername FROM dirty GROUP BY peername ORDER BY random()")
|
|
+ "SELECT peername FROM dirty GROUP BY peername")
|
|
{
|
|
- textlist_add(&tl, url_decode(SQL_V[0]), 0);
|
|
+ textlist_add(&tl, url_decode(SQL_V(0)), 0);
|
|
} SQL_END;
|
|
|
|
for (t = tl; t != 0; t = t->next) {
|
|
@@ -672,7 +677,7 @@ found_host_check:
|
|
|
|
if ( connect_to_host(peername) ) {
|
|
csync_error_count++;
|
|
- csync_debug(0, "ERROR: Connection to remote host failed.\n");
|
|
+ csync_debug(0, "ERROR: Connection to remote host `%s' failed.\n", peername);
|
|
return 0;
|
|
}
|
|
|
|
@@ -774,7 +779,7 @@ found_host_check:
|
|
|
|
if ( connect_to_host(peername) ) {
|
|
csync_error_count++;
|
|
- csync_debug(0, "ERROR: Connection to remote host failed.\n");
|
|
+ csync_debug(0, "ERROR: Connection to remote host `%s' failed.\n", peername);
|
|
return 0;
|
|
}
|
|
|
|
@@ -798,7 +803,7 @@ found_host:
|
|
filename ? url_encode(filename) : "",
|
|
filename ? "'" : "")
|
|
{
|
|
- char *l_file = strdup(url_decode(SQL_V[1])), *l_checktxt = strdup(url_decode(SQL_V[0]));
|
|
+ char *l_file = strdup(url_decode(SQL_V(1))), *l_checktxt = strdup(url_decode(SQL_V(0)));
|
|
if ( csync_match_file_host(l_file, myname, peername, 0) ) {
|
|
if ( remote_eof ) {
|
|
got_remote_eof:
|
|
@@ -936,17 +941,17 @@ void csync_remove_old()
|
|
const struct csync_group *g = 0;
|
|
const struct csync_group_host *h;
|
|
|
|
- const char *filename = url_decode(SQL_V[0]);
|
|
+ const char *filename = url_decode(SQL_V(0));
|
|
|
|
while ((g=csync_find_next(g, filename)) != 0) {
|
|
- if (!strcmp(g->myname, SQL_V[1]))
|
|
+ if (!strcmp(g->myname, SQL_V(1)))
|
|
for (h = g->host; h; h = h->next) {
|
|
- if (!strcmp(h->hostname, SQL_V[2]))
|
|
+ if (!strcmp(h->hostname, SQL_V(2)))
|
|
goto this_dirty_record_is_ok;
|
|
}
|
|
}
|
|
|
|
- textlist_add2(&tl, SQL_V[0], SQL_V[2], 0);
|
|
+ textlist_add2(&tl, SQL_V(0), SQL_V(2), 0);
|
|
|
|
this_dirty_record_is_ok:
|
|
;
|
|
@@ -962,8 +967,8 @@ this_dirty_record_is_ok:
|
|
SQL_BEGIN("Query file DB",
|
|
"SELECT filename FROM file")
|
|
{
|
|
- if (!csync_find_next(0, url_decode(SQL_V[0])))
|
|
- textlist_add(&tl, SQL_V[0], 0);
|
|
+ if (!csync_find_next(0, url_decode(SQL_V(0))))
|
|
+ textlist_add(&tl, SQL_V(0), 0);
|
|
} SQL_END;
|
|
for (t = tl; t != 0; t = t->next) {
|
|
csync_debug(1, "Removing %s from file db.\n", t->value);
|