From 884f5414c2e07fe310d1f8623c7cbe857bf1ffe3 Mon Sep 17 00:00:00 2001 From: Mario Fetka Date: Fri, 24 Mar 2017 11:42:13 +0100 Subject: [PATCH] Imported Upstream version 0.9.1 --- CMakeLists.txt | 71 + README | 48 + binlog.c | 284 +++ binlog.h | 30 + cmake/modules/CppcheckTargets.cmake | 214 ++ cmake/modules/FindGLIB2.cmake | 22 + cmake/modules/FindMySQL.cmake | 111 + cmake/modules/FindPCRE.cmake | 45 + cmake/modules/FindSphinx.cmake | 57 + cmake/modules/Findcppcheck.cmake | 142 ++ cmake/modules/Findcppcheck.cpp | 16 + common.h | 45 + config.h.in | 7 + docs/CMakeLists.txt | 156 ++ docs/_build/conf.py.in | 218 ++ docs/_build/sources.cmake.in | 16 + docs/authors.rst | 9 + docs/compiling.rst | 70 + docs/examples.rst | 37 + docs/files.rst | 61 + docs/index.rst | 25 + docs/mydumper_usage.rst | 193 ++ docs/myloader_usage.rst | 99 + g_unix_signal.c | 128 ++ g_unix_signal.h | 10 + mydumper.c | 2890 +++++++++++++++++++++++++++ mydumper.h | 100 + myloader.c | 577 ++++++ myloader.h | 51 + server_detect.c | 71 + server_detect.h | 28 + 31 files changed, 5831 insertions(+) create mode 100644 CMakeLists.txt create mode 100644 README create mode 100644 binlog.c create mode 100644 binlog.h create mode 100644 cmake/modules/CppcheckTargets.cmake create mode 100644 cmake/modules/FindGLIB2.cmake create mode 100644 cmake/modules/FindMySQL.cmake create mode 100644 cmake/modules/FindPCRE.cmake create mode 100644 cmake/modules/FindSphinx.cmake create mode 100644 cmake/modules/Findcppcheck.cmake create mode 100644 cmake/modules/Findcppcheck.cpp create mode 100644 common.h create mode 100644 config.h.in create mode 100644 docs/CMakeLists.txt create mode 100644 docs/_build/conf.py.in create mode 100644 docs/_build/sources.cmake.in create mode 100644 docs/authors.rst create mode 100644 docs/compiling.rst create mode 100644 docs/examples.rst create mode 100644 docs/files.rst create mode 100644 docs/index.rst create mode 100644 docs/mydumper_usage.rst create mode 100644 docs/myloader_usage.rst create mode 100644 g_unix_signal.c create mode 100644 g_unix_signal.h create mode 100644 mydumper.c create mode 100644 mydumper.h create mode 100644 myloader.c create mode 100644 myloader.h create mode 100644 server_detect.c create mode 100644 server_detect.h diff --git a/CMakeLists.txt b/CMakeLists.txt new file mode 100644 index 0000000..5da3828 --- /dev/null +++ b/CMakeLists.txt @@ -0,0 +1,71 @@ +cmake_minimum_required(VERSION 2.6) +project(mydumper) +set(VERSION 0.9.1) +set(ARCHIVE_NAME "${CMAKE_PROJECT_NAME}-${VERSION}") + +#Required packages +set(CMAKE_MODULE_PATH ${CMAKE_SOURCE_DIR}/cmake/modules) +find_package(MySQL) +find_package(ZLIB) +find_package(GLIB2) +find_package(PCRE) + +option(BUILD_DOCS "Build the documentation" ON) + +if (BUILD_DOCS) + add_subdirectory(docs) +endif (BUILD_DOCS) + +option(WITH_BINLOG "Build binlog dump options" OFF) + +set(CMAKE_C_FLAGS "-Wall -Wno-deprecated-declarations -Wunused -Wwrite-strings -Wno-strict-aliasing -Wextra -Wshadow -Werror -O3 -g ${MYSQL_CFLAGS}") + +include_directories(${MYDUMPER_SOURCE_DIR} ${MYSQL_INCLUDE_DIR} ${GLIB2_INCLUDE_DIR} ${PCRE_INCLUDE_DIR} ${ZLIB_INCLUDE_DIRS}) + +if (NOT CMAKE_INSTALL_PREFIX) + SET(CMAKE_INSTALL_PREFIX "/usr/local" CACHE STRING "Install path" FORCE) +endif (NOT CMAKE_INSTALL_PREFIX) +MARK_AS_ADVANCED(CMAKE) + +CONFIGURE_FILE(${CMAKE_CURRENT_SOURCE_DIR}/config.h.in ${CMAKE_CURRENT_SOURCE_DIR}/config.h) + + +if (WITH_BINLOG) + add_executable(mydumper mydumper.c binlog.c server_detect.c g_unix_signal.c) +else (WITH_BINLOG) + add_executable(mydumper mydumper.c server_detect.c g_unix_signal.c) +endif (WITH_BINLOG) +target_link_libraries(mydumper ${MYSQL_LIBRARIES} ${GLIB2_LIBRARIES} ${GTHREAD2_LIBRARIES} ${PCRE_PCRE_LIBRARY} ${ZLIB_LIBRARIES}) + + +add_executable(myloader myloader.c) +target_link_libraries(myloader ${MYSQL_LIBRARIES} ${GLIB2_LIBRARIES} ${GTHREAD2_LIBRARIES} ${PCRE_PCRE_LIBRARY} ${ZLIB_LIBRARIES}) + +INSTALL(TARGETS mydumper myloader + RUNTIME DESTINATION bin +) + +add_custom_target(dist + COMMAND bzr export --root=${ARCHIVE_NAME} + ${CMAKE_BINARY_DIR}/${ARCHIVE_NAME}.tar.gz + WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}) + +OPTION(RUN_CPPCHECK "Run cppcheck" OFF) + +IF(RUN_CPPCHECK) + include(CppcheckTargets) + add_cppcheck(mydumper) + add_cppcheck(myloader) +ENDIF(RUN_CPPCHECK) + + +MESSAGE(STATUS "------------------------------------------------") +MESSAGE(STATUS "MYSQL_CONFIG = ${MYSQL_CONFIG}") +MESSAGE(STATUS "CMAKE_INSTALL_PREFIX = ${CMAKE_INSTALL_PREFIX}") +MESSAGE(STATUS "BUILD_DOCS = ${BUILD_DOCS}") +MESSAGE(STATUS "WITH_BINLOG = ${WITH_BINLOG}") +MESSAGE(STATUS "RUN_CPPCHECK = ${RUN_CPPCHECK}") +MESSAGE(STATUS "Change a values with: cmake -D=") +MESSAGE(STATUS "------------------------------------------------") +MESSAGE(STATUS) + diff --git a/README b/README new file mode 100644 index 0000000..38104c2 --- /dev/null +++ b/README @@ -0,0 +1,48 @@ +== What is mydumper? Why? == + +* Parallelism (hence, speed) and performance (avoids expensive character set conversion routines, efficient code overall) +* Easier to manage output (separate files for tables, dump metadata, etc, easy to view/parse data) +* Consistency - maintains snapshot across all threads, provides accurate master and slave log positions, etc +* Manageability - supports PCRE for specifying database and tables inclusions and exclusions + +== How to build it? == + +Run: + cmake . + make + +One needs to install development versions of required libaries (MySQL, GLib, ZLib, PCRE): +NOTE: you must use the correspondent mysql devel package. + +* Ubuntu or Debian: apt-get install libglib2.0-dev libmysqlclient15-dev zlib1g-dev libpcre3-dev libssl-dev +* Fedora, RedHat and CentOS: yum install glib2-devel mysql-devel zlib-devel pcre-devel openssl-devel +* openSUSE: zypper install glib2-devel libmysqlclient-devel pcre-devel zlib-devel +* MacOSX: port install glib2 mysql5 pcre pkgconfig cmake + (You may want to run 'port select mysql mysql5' afterwards) + +One has to make sure, that pkg-config, mysql_config, pcre-config are all in $PATH + +Binlog dump is disabled by default to compile with it you need to add -DWITH_BINLOG=ON to cmake options + +== How does consistent snapshot work? == + +This is all done following best MySQL practices and traditions: + +* As a precaution, slow running queries on the server either abort the dump, or get killed +* Global write lock is acquired ("FLUSH TABLES WITH READ LOCK") +* Various metadata is read ("SHOW SLAVE STATUS","SHOW MASTER STATUS") +* Other threads connect and establish snapshots ("START TRANSACTION WITH CONSISTENT SNAPSHOT") +** On pre-4.1.8 it creates dummy InnoDB table, and reads from it. +* Once all worker threads announce the snapshot establishment, master executes "UNLOCK TABLES" and starts queueing jobs. + +This for now does not provide consistent snapshots for non-transactional engines - support for that is expected in 0.2 :) + +== How to exclude (or include) databases? == + +Once can use --regex functionality, for example not to dump mysql and test databases: + + mydumper --regex '^(?!(mysql|test))' + +Of course, regex functionality can be used to describe pretty much any list of tables. + + diff --git a/binlog.c b/binlog.c new file mode 100644 index 0000000..0872375 --- /dev/null +++ b/binlog.c @@ -0,0 +1,284 @@ +/* + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + + Authors: Domas Mituzas, Facebook ( domas at fb dot com ) + Mark Leith, Oracle Corporation (mark dot leith at oracle dot com) + Andrew Hutchings, SkySQL (andrew at skysql dot com) + +*/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "mydumper.h" +#include "binlog.h" + +#define BINLOG_MAGIC "\xfe\x62\x69\x6e" + +#define EVENT_HEADER_LENGTH 19 +#define EVENT_ROTATE_FIXED_LENGTH 8 + +enum event_postions { + EVENT_TIMESTAMP_POSITION= 0, + EVENT_TYPE_POSITION= 4, + EVENT_SERVERID_POSITION= 5, + EVENT_LENGTH_POSITION= 9, + EVENT_NEXT_POSITION= 13, + EVENT_FLAGS_POSITION= 17, + EVENT_EXTRA_FLAGS_POSITION= 19 // currently unused in v4 binlogs, but a good marker for end of header +}; + +enum event_type { + ROTATE_EVENT= 4, + FORMAT_DESCRIPTION_EVENT= 15, + EVENT_TOO_SHORT= 254 // arbitrary high number, in 5.1 the max event type number is 27 so this should be fine for a while +}; + +extern int compress_output; +extern gboolean daemon_mode; +extern gboolean shutdown_triggered; + +FILE *new_binlog_file(char *binlog_file, const char *binlog_dir); +void close_binlog_file(FILE *outfile); +char *rotate_file_name(const char *buf); + +void get_binlogs(MYSQL *conn, struct configuration *conf) { + // TODO: find logs we already have, use start position based on position of last log. + MYSQL_RES *result; + MYSQL_ROW row; + char* last_filename = NULL; + guint64 last_position; + + // Only snapshot dump the binlogs once in daemon mode + static gboolean got_binlogs= FALSE; + if (got_binlogs) + return; + else + got_binlogs= TRUE; + + if (mysql_query(conn, "SHOW MASTER STATUS")) { + g_critical("Error: Could not execute query: %s", mysql_error(conn)); + return; + } + + result = mysql_store_result(conn); + if ((row = mysql_fetch_row(result))) { + last_filename= g_strdup(row[0]); + last_position= strtoll(row[1], NULL, 10); + } else { + g_critical("Error: Could not obtain binary log stop position"); + if (last_filename != NULL) + g_free(last_filename); + return; + } + mysql_free_result(result); + + if (mysql_query(conn, "SHOW BINARY LOGS")) { + g_critical("Error: Could not execute query: %s", mysql_error(conn)); + if (last_filename != NULL) + g_free(last_filename); + return; + } + + + result = mysql_store_result(conn); + while ((row = mysql_fetch_row(result))) { + struct job *j = g_new0(struct job,1); + struct binlog_job *bj = g_new0(struct binlog_job,1); + j->job_data=(void*) bj; + bj->filename=g_strdup(row[0]); + bj->start_position=4; + bj->stop_position= (!strcasecmp(row[0], last_filename)) ? last_position : 0; + j->conf=conf; + j->type=JOB_BINLOG; + g_async_queue_push(conf->queue,j); + } + mysql_free_result(result); + if (last_filename != NULL) + g_free(last_filename); +} + +void get_binlog_file(MYSQL *conn, char *binlog_file, const char *binlog_directory, guint64 start_position, guint64 stop_position, gboolean continuous) { + // set serverID = max serverID - threadID to try an eliminate conflicts, + // 0 is bad because mysqld will disconnect at the end of the last log + // dupes aren't too bad since it is up to the client to check for them + uchar buf[128]; + // We need to read the raw network packets + NET* net; + net= &conn->net; + unsigned long len; + FILE* outfile; + guint32 event_type; + gboolean read_error= FALSE; + gboolean read_end= FALSE; + gboolean rotated= FALSE; + guint32 server_id= G_MAXUINT32 - mysql_thread_id(conn); + guint64 pos_counter= 0; + + int4store(buf, (guint32)start_position); + // Binlog flags (2 byte int) + int2store(buf + 4, 0); + // ServerID + int4store(buf + 6, server_id); + memcpy(buf + 10, binlog_file, strlen(binlog_file)); +#if MYSQL_VERSION_ID < 50100 + if (simple_command(conn, COM_BINLOG_DUMP, (const char *)buf, +#else + if (simple_command(conn, COM_BINLOG_DUMP, buf, +#endif + strlen(binlog_file) + 10, 1)) { + g_critical("Error: binlog: Critical error whilst requesting binary log"); + } + + while(1) { + outfile= new_binlog_file(binlog_file, binlog_directory); + if (outfile == NULL) { + g_critical("Error: binlog: Could not create binlog file '%s', %d", binlog_file, errno); + return; + } + + write_binlog(outfile, BINLOG_MAGIC, 4); + while(1) { + len = 0; + if (net->vio != 0) len=my_net_read(net); + if ((len == 0) || (len == ~(unsigned long) 0)) { + // Net timeout (set to 1 second) + if (mysql_errno(conn) == ER_NET_READ_INTERRUPTED) { + if (shutdown_triggered) { + close_binlog_file(outfile); + return; + } else { + continue; + } + // A real error + } else { + g_critical("Error: binlog: Network packet read error getting binlog file: %s", binlog_file); + close_binlog_file(outfile); + return; + } + } + if (len < 8 && net->read_pos[0]) { + // end of data + break; + } + pos_counter += len; + event_type= get_event((const char*)net->read_pos + 1, len -1); + switch (event_type) { + case EVENT_TOO_SHORT: + g_critical("Error: binlog: Event too short in binlog file: %s", binlog_file); + read_error= TRUE; + break; + case ROTATE_EVENT: + if (rotated) { + read_end= TRUE; + } else { + len= 1; + rotated= TRUE; + } + break; + default: + // if we get this far this is a normal event to record + break; + } + if (read_error) break; + write_binlog(outfile, (const char*)net->read_pos + 1, len - 1); + if (read_end) { + if (!continuous) { + break; + } else { + g_free(binlog_file); + binlog_file= rotate_file_name((const char*)net->read_pos + 1); + break; + } + } + // stop if we are at requested end of last log + if ((stop_position > 0) && (pos_counter >= stop_position)) break; + } + close_binlog_file(outfile); + if ((!continuous) || (!read_end)) break; + + if (continuous && read_end) { + read_end= FALSE; + rotated= FALSE; + } + } +} + +char *rotate_file_name(const char *buf) { + guint32 event_length= 0; + + // event length is 4 bytes at position 9 + event_length= uint4korr(&buf[EVENT_LENGTH_POSITION]); + // event length includes the header, plus a rotate event has a fixed 8byte part we don't need + event_length= event_length - EVENT_HEADER_LENGTH - EVENT_ROTATE_FIXED_LENGTH; + + return g_strndup(&buf[EVENT_HEADER_LENGTH + EVENT_ROTATE_FIXED_LENGTH], event_length); +} + +FILE *new_binlog_file(char *binlog_file, const char *binlog_dir) { + FILE *outfile; + char* filename; + + if (!compress_output) { + filename= g_strdup_printf("%s/%s", binlog_dir, binlog_file); + outfile= g_fopen(filename, "w"); + } else { + filename= g_strdup_printf("%s/%s.gz", binlog_dir, binlog_file); + outfile= (void*) gzopen(filename, "w"); + } + g_free(filename); + + return outfile; +} + +void close_binlog_file(FILE *outfile) { + if (!compress_output) + fclose(outfile); + else + gzclose((gzFile) outfile); +} + +unsigned int get_event(const char *buf, unsigned int len) { + if (len < EVENT_TYPE_POSITION) + return EVENT_TOO_SHORT; + return buf[EVENT_TYPE_POSITION]; + + // TODO: Would be good if we can check for valid event type, unfortunately this check can change from version to version +} + +void write_binlog(FILE* file, const char* data, guint64 len) { + int err; + + if (len > 0) { + int write_result; + + if (!compress_output) + write_result= write(fileno(file), data, len); + else + write_result= gzwrite((gzFile)file, data, len); + + if (write_result <= 0) { + if (!compress_output) + g_critical("Error: binlog: Error writing binary log: %s", strerror(errno)); + else + g_critical("Error: binlog: Error writing compressed binary log: %s", gzerror((gzFile)file, &err)); + } + } +} diff --git a/binlog.h b/binlog.h new file mode 100644 index 0000000..863d8d6 --- /dev/null +++ b/binlog.h @@ -0,0 +1,30 @@ +/* + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + + Authors: Domas Mituzas, Facebook ( domas at fb dot com ) + Mark Leith, Oracle Corporation (mark dot leith at oracle dot com) + Andrew Hutchings, SkySQL (andrew at skysql dot com) + +*/ + +#ifndef _binlog_h +#define _binlog_h +#include "mydumper.h" + +void get_binlogs(MYSQL *conn, struct configuration *conf); +void get_binlog_file(MYSQL *conn, char *binlog_file, const char *binlog_directory, guint64 start_position, guint64 stop_position, gboolean continuous); +unsigned int get_event(const char *buf, unsigned int len); +void write_binlog(FILE* file, const char* data, guint64 len); + +#endif diff --git a/cmake/modules/CppcheckTargets.cmake b/cmake/modules/CppcheckTargets.cmake new file mode 100644 index 0000000..b554f33 --- /dev/null +++ b/cmake/modules/CppcheckTargets.cmake @@ -0,0 +1,214 @@ +# - Run cppcheck on c++ source files as a custom target and a test +# +# include(CppcheckTargets) +# add_cppcheck( [UNUSED_FUNCTIONS] [STYLE] [POSSIBLE_ERROR] [FAIL_ON_WARNINGS]) - +# Create a target to check a target's sources with cppcheck and the indicated options +# add_cppcheck_sources( [UNUSED_FUNCTIONS] [STYLE] [POSSIBLE_ERROR] [FAIL_ON_WARNINGS]) - +# Create a target to check standalone sources with cppcheck and the indicated options +# +# Requires these CMake modules: +# Findcppcheck +# +# Requires CMake 2.6 or newer (uses the 'function' command) +# +# Original Author: +# 2009-2010 Ryan Pavlik +# http://academic.cleardefinition.com +# Iowa State University HCI Graduate Program/VRAC +# +# Copyright Iowa State University 2009-2010. +# Distributed under the Boost Software License, Version 1.0. +# (See accompanying file LICENSE_1_0.txt or copy at +# http://www.boost.org/LICENSE_1_0.txt) + +if(__add_cppcheck) + return() +endif() +set(__add_cppcheck YES) + +if(NOT CPPCHECK_FOUND) + find_package(cppcheck QUIET) +endif() + +if(CPPCHECK_FOUND) + if(NOT TARGET all_cppcheck) + add_custom_target(all_cppcheck) + set_target_properties(all_cppcheck PROPERTIES EXCLUDE_FROM_ALL TRUE) + endif() +endif() + +function(add_cppcheck_sources _targetname) + if(CPPCHECK_FOUND) + set(_cppcheck_args) + set(_input ${ARGN}) + list(FIND _input UNUSED_FUNCTIONS _unused_func) + if("${_unused_func}" GREATER "-1") + list(APPEND _cppcheck_args ${CPPCHECK_UNUSEDFUNC_ARG}) + list(REMOVE_AT _input ${_unused_func}) + endif() + + list(FIND _input STYLE _style) + if("${_style}" GREATER "-1") + list(APPEND _cppcheck_args ${CPPCHECK_STYLE_ARG}) + list(REMOVE_AT _input ${_style}) + endif() + + list(FIND _input POSSIBLE_ERROR _poss_err) + if("${_poss_err}" GREATER "-1") + list(APPEND _cppcheck_args ${CPPCHECK_POSSIBLEERROR_ARG}) + list(REMOVE_AT _input ${_poss_err}) + endif() + + list(FIND _input FAIL_ON_WARNINGS _fail_on_warn) + if("${_fail_on_warn}" GREATER "-1") + list(APPEND + CPPCHECK_FAIL_REGULAR_EXPRESSION + ${CPPCHECK_WARN_REGULAR_EXPRESSION}) + list(REMOVE_AT _input ${_fail_on_warn}) + endif() + + set(_files) + foreach(_source ${_input}) + get_source_file_property(_cppcheck_loc "${_source}" LOCATION) + if(_cppcheck_loc) + # This file has a source file property, carry on. + get_source_file_property(_cppcheck_lang "${_source}" LANGUAGE) + if("${_cppcheck_lang}" MATCHES "C") + list(APPEND _files "${_cppcheck_loc}") + endif() + else() + # This file doesn't have source file properties - figure it out. + get_filename_component(_cppcheck_loc "${_source}" ABSOLUTE) + if(EXISTS "${_cppcheck_loc}") + list(APPEND _files "${_cppcheck_loc}") + else() + message(FATAL_ERROR + "Adding CPPCHECK for file target ${_targetname}: " + "File ${_source} does not exist or needs a corrected path location " + "since we think its absolute path is ${_cppcheck_loc}") + endif() + endif() + endforeach() + + if("1.${CMAKE_VERSION}" VERSION_LESS "1.2.8.0") + # Older than CMake 2.8.0 + add_test(${_targetname}_cppcheck_test + "${CPPCHECK_EXECUTABLE}" + ${CPPCHECK_TEMPLATE_ARG} + ${_cppcheck_args} + ${_files}) + else() + # CMake 2.8.0 and newer + add_test(NAME + ${_targetname}_cppcheck_test + COMMAND + "${CPPCHECK_EXECUTABLE}" + ${CPPCHECK_TEMPLATE_ARG} + ${_cppcheck_args} + ${_files}) + endif() + + set_tests_properties(${_targetname}_cppcheck_test + PROPERTIES + FAIL_REGULAR_EXPRESSION + "${CPPCHECK_FAIL_REGULAR_EXPRESSION}") + + add_custom_command(TARGET + all_cppcheck + PRE_BUILD + COMMAND + ${CPPCHECK_EXECUTABLE} + ${CPPCHECK_QUIET_ARG} + ${CPPCHECK_TEMPLATE_ARG} + ${_cppcheck_args} + ${_files} + WORKING_DIRECTORY + "${CMAKE_CURRENT_SOURCE_DIR}" + COMMENT + "${_targetname}_cppcheck: Running cppcheck on target ${_targetname}..." + VERBATIM) + endif() +endfunction() + +function(add_cppcheck _name) + if(NOT TARGET ${_name}) + message(FATAL_ERROR + "add_cppcheck given a target name that does not exist: '${_name}' !") + endif() + if(CPPCHECK_FOUND) + set(_cppcheck_args) + + list(FIND ARGN UNUSED_FUNCTIONS _unused_func) + if("${_unused_func}" GREATER "-1") + list(APPEND _cppcheck_args ${CPPCHECK_UNUSEDFUNC_ARG}) + endif() + + list(FIND ARGN STYLE _style) + if("${_style}" GREATER "-1") + list(APPEND _cppcheck_args ${CPPCHECK_STYLE_ARG}) + endif() + + list(FIND ARGN POSSIBLE_ERROR _poss_err) + if("${_poss_err}" GREATER "-1") + list(APPEND _cppcheck_args ${CPPCHECK_POSSIBLEERROR_ARG}) + endif() + + list(FIND _input FAIL_ON_WARNINGS _fail_on_warn) + if("${_fail_on_warn}" GREATER "-1") + list(APPEND + CPPCHECK_FAIL_REGULAR_EXPRESSION + ${CPPCHECK_WARN_REGULAR_EXPRESSION}) + list(REMOVE_AT _input ${_unused_func}) + endif() + + get_target_property(_cppcheck_sources "${_name}" SOURCES) + set(_files) + foreach(_source ${_cppcheck_sources}) + get_source_file_property(_cppcheck_lang "${_source}" LANGUAGE) + get_source_file_property(_cppcheck_loc "${_source}" LOCATION) + if("${_cppcheck_lang}" MATCHES "C") + list(APPEND _files "${_cppcheck_loc}") + endif() + endforeach() + + if("1.${CMAKE_VERSION}" VERSION_LESS "1.2.8.0") + # Older than CMake 2.8.0 + add_test(${_name}_cppcheck_test + "${CPPCHECK_EXECUTABLE}" + ${CPPCHECK_TEMPLATE_ARG} + ${_cppcheck_args} + ${_files}) + else() + # CMake 2.8.0 and newer + add_test(NAME + ${_name}_cppcheck_test + COMMAND + "${CPPCHECK_EXECUTABLE}" + ${CPPCHECK_TEMPLATE_ARG} + ${_cppcheck_args} + ${_files}) + endif() + + set_tests_properties(${_name}_cppcheck_test + PROPERTIES + FAIL_REGULAR_EXPRESSION + "${CPPCHECK_FAIL_REGULAR_EXPRESSION}") + + add_custom_command(TARGET + all_cppcheck + PRE_BUILD + COMMAND + ${CPPCHECK_EXECUTABLE} + ${CPPCHECK_QUIET_ARG} + ${CPPCHECK_TEMPLATE_ARG} + "--enable=style,information,unusedFunction" + ${_cppcheck_args} + ${_files} + WORKING_DIRECTORY + "${CMAKE_CURRENT_SOURCE_DIR}" + COMMENT + "${_name}_cppcheck: Running cppcheck on target ${_name}..." + VERBATIM) + endif() + +endfunction() diff --git a/cmake/modules/FindGLIB2.cmake b/cmake/modules/FindGLIB2.cmake new file mode 100644 index 0000000..e50cf58 --- /dev/null +++ b/cmake/modules/FindGLIB2.cmake @@ -0,0 +1,22 @@ +# - Try to find the GLIB2 libraries + +if(GLIB2_INCLUDE_DIR AND GLIB2_LIBRARIES AND GTHREAD2_LIBRARIES) + # Already in cache, be silent + set(GLIB2_FIND_QUIETLY TRUE) +endif(GLIB2_INCLUDE_DIR AND GLIB2_LIBRARIES AND GTHREAD2_LIBRARIES) + +if (NOT WIN32) + include(FindPkgConfig) + pkg_search_module(PC_GLIB2 REQUIRED glib-2.0) + pkg_search_module(PC_GTHREAD2 REQUIRED gthread-2.0) +endif(NOT WIN32) + +set(GLIB2_INCLUDE_DIR ${PC_GLIB2_INCLUDE_DIRS}) + +find_library(GLIB2_LIBRARIES NAMES glib-2.0 HINTS ${PC_GLIB2_LIBDIR} ${PC_GLIB2_LIBRARY_DIRS}) + +find_library(GTHREAD2_LIBRARIES NAMES gthread-2.0 HINTS ${PC_GTHREAD2_LIBDIR} ${PC_GTHREAD2_LIBRARY_DIRS}) + + +mark_as_advanced(GLIB2_INCLUDE_DIR GLIB2_LIBRARIES GTHREAD2_LIBRARIES) + diff --git a/cmake/modules/FindMySQL.cmake b/cmake/modules/FindMySQL.cmake new file mode 100644 index 0000000..ef64646 --- /dev/null +++ b/cmake/modules/FindMySQL.cmake @@ -0,0 +1,111 @@ +# - Find MySQL +# Find the MySQL includes and client library +# This module defines +# MYSQL_INCLUDE_DIR, where to find mysql.h +# MYSQL_LIBRARIES, the libraries needed to use MySQL. +# MYSQL_FOUND, If false, do not try to use MySQL. +# +# Copyright (c) 2006, Jaroslaw Staniek, +# Lot of adustmens by Michal Cihar +# +# vim: expandtab sw=4 ts=4 sts=4: +# +# Redistribution and use is allowed according to the terms of the BSD license. + +if(UNIX) + set(MYSQL_CONFIG_PREFER_PATH "$ENV{MYSQL_HOME}/bin" CACHE FILEPATH + "preferred path to MySQL (mysql_config)") + find_program(MYSQL_CONFIG mysql_config + ${MYSQL_CONFIG_PREFER_PATH} + /usr/local/mysql/bin/ + /usr/local/bin/ + /usr/bin/ + ) + + if(MYSQL_CONFIG) + message(STATUS "Using mysql-config: ${MYSQL_CONFIG}") + # set CFLAGS + exec_program(${MYSQL_CONFIG} + ARGS --cflags + OUTPUT_VARIABLE MY_TMP) + + set(MYSQL_CFLAGS ${MY_TMP} CACHE STRING INTERNAL) + + # set INCLUDE_DIR + exec_program(${MYSQL_CONFIG} + ARGS --include + OUTPUT_VARIABLE MY_TMP) + + string(REGEX REPLACE "-I([^ ]*)( .*)?" "\\1" MY_TMP "${MY_TMP}") + + set(MYSQL_ADD_INCLUDE_DIR ${MY_TMP} CACHE FILEPATH INTERNAL) + + # set LIBRARY_DIR + exec_program(${MYSQL_CONFIG} + ARGS --libs_r + OUTPUT_VARIABLE MY_TMP) + + set(MYSQL_ADD_LIBRARIES "") + + # prepend space in order to match separate words only (e.g. rather + # than "-linux" from within "-L/usr/lib/i386-linux-gnu") + string(REGEX MATCHALL " +-l[^ ]*" MYSQL_LIB_LIST " ${MY_TMP}") + foreach(MY_LIB ${MYSQL_LIB_LIST}) + string(REGEX REPLACE "[ ]*-l([^ ]*)" "\\1" MY_LIB "${MY_LIB}") + list(APPEND MYSQL_ADD_LIBRARIES "${MY_LIB}") + endforeach(MY_LIB ${MYSQL_LIBS}) + + set(MYSQL_ADD_LIBRARY_PATH "") + + string(REGEX MATCHALL " +-L[^ ]*" MYSQL_LIBDIR_LIST " ${MY_TMP}") + foreach(MY_LIB ${MYSQL_LIBDIR_LIST}) + string(REGEX REPLACE "[ ]*-L([^ ]*)" "\\1" MY_LIB "${MY_LIB}") + list(APPEND MYSQL_ADD_LIBRARY_PATH "${MY_LIB}") + endforeach(MY_LIB ${MYSQL_LIBS}) + + else(MYSQL_CONFIG) + set(MYSQL_ADD_LIBRARIES "") + list(APPEND MYSQL_ADD_LIBRARIES "mysqlclient") + endif(MYSQL_CONFIG) +else(UNIX) + set(MYSQL_ADD_INCLUDE_DIR "c:/msys/local/include" CACHE FILEPATH INTERNAL) + set(MYSQL_ADD_LIBRARY_PATH "c:/msys/local/lib" CACHE FILEPATH INTERNAL) +ENDIF(UNIX) + +find_path(MYSQL_INCLUDE_DIR mysql.h + ${MYSQL_ADD_INCLUDE_DIR} + /usr/local/include + /usr/local/include/mysql + /usr/local/mysql/include + /usr/local/mysql/include/mysql + /usr/include + /usr/include/mysql + /usr/include/mysql/private +) + +set(TMP_MYSQL_LIBRARIES "") +set(CMAKE_FIND_LIBRARY_SUFFIXES .so .a .lib) +foreach(MY_LIB ${MYSQL_ADD_LIBRARIES}) + find_library("MYSQL_LIBRARIES_${MY_LIB}" NAMES ${MY_LIB} + HINTS + ${MYSQL_ADD_LIBRARY_PATH} + /usr/lib/mysql + /usr/lib + /usr/local/lib + /usr/local/lib/mysql + /usr/local/mysql/lib + ) + list(APPEND TMP_MYSQL_LIBRARIES "${MYSQL_LIBRARIES_${MY_LIB}}") +endforeach(MY_LIB ${MYSQL_ADD_LIBRARIES}) + +set(MYSQL_LIBRARIES ${TMP_MYSQL_LIBRARIES} CACHE FILEPATH INTERNAL) + +if(MYSQL_INCLUDE_DIR AND MYSQL_LIBRARIES) + set(MYSQL_FOUND TRUE CACHE INTERNAL "MySQL found") + message(STATUS "Found MySQL: ${MYSQL_INCLUDE_DIR}, ${MYSQL_LIBRARIES}") +else(MYSQL_INCLUDE_DIR AND MYSQL_LIBRARIES) + set(MYSQL_FOUND FALSE CACHE INTERNAL "MySQL found") + message(STATUS "MySQL not found.") +endif(MYSQL_INCLUDE_DIR AND MYSQL_LIBRARIES) + +mark_as_advanced(MYSQL_INCLUDE_DIR MYSQL_LIBRARIES MYSQL_CFLAGS) diff --git a/cmake/modules/FindPCRE.cmake b/cmake/modules/FindPCRE.cmake new file mode 100644 index 0000000..8fd9178 --- /dev/null +++ b/cmake/modules/FindPCRE.cmake @@ -0,0 +1,45 @@ +# - Try to find the PCRE regular expression library +# Once done this will define +# +# PCRE_FOUND - system has the PCRE library +# PCRE_INCLUDE_DIR - the PCRE include directory +# PCRE_LIBRARIES - The libraries needed to use PCRE + +# Copyright (c) 2006, Alexander Neundorf, +# +# Redistribution and use is allowed according to the terms of the BSD license. +# For details see the accompanying COPYING-CMAKE-SCRIPTS file. + + +if (PCRE_INCLUDE_DIR AND PCRE_PCREPOSIX_LIBRARY AND PCRE_PCRE_LIBRARY) + # Already in cache, be silent + set(PCRE_FIND_QUIETLY TRUE) +endif (PCRE_INCLUDE_DIR AND PCRE_PCREPOSIX_LIBRARY AND PCRE_PCRE_LIBRARY) + + +if (NOT WIN32) + # use pkg-config to get the directories and then use these values + # in the FIND_PATH() and FIND_LIBRARY() calls + find_package(PkgConfig) + + pkg_check_modules(PC_PCRE REQUIRED libpcre) + + set(PCRE_DEFINITIONS ${PC_PCRE_CFLAGS_OTHER}) + +endif (NOT WIN32) + +find_path(PCRE_INCLUDE_DIR pcre.h + HINTS ${PC_PCRE_INCLUDEDIR} ${PC_PCRE_INCLUDE_DIRS} + PATH_SUFFIXES pcre) + +find_library(PCRE_PCRE_LIBRARY NAMES pcre HINTS ${PC_PCRE_LIBDIR} ${PC_PCRE_LIBRARY_DIRS}) + +find_library(PCRE_PCREPOSIX_LIBRARY NAMES pcreposix HINTS ${PC_PCRE_LIBDIR} ${PC_PCRE_LIBRARY_DIRS}) + +include(FindPackageHandleStandardArgs) +find_package_handle_standard_args(PCRE DEFAULT_MSG PCRE_INCLUDE_DIR PCRE_PCRE_LIBRARY PCRE_PCREPOSIX_LIBRARY ) + +set(PCRE_LIBRARIES ${PCRE_PCRE_LIBRARY} ${PCRE_PCREPOSIX_LIBRARY}) + +mark_as_advanced(PCRE_INCLUDE_DIR PCRE_LIBRARIES PCRE_PCREPOSIX_LIBRARY PCRE_PCRE_LIBRARY) + diff --git a/cmake/modules/FindSphinx.cmake b/cmake/modules/FindSphinx.cmake new file mode 100644 index 0000000..6c3417a --- /dev/null +++ b/cmake/modules/FindSphinx.cmake @@ -0,0 +1,57 @@ +# - This module looks for Sphinx +# Find the Sphinx documentation generator +# +# This modules defines +# SPHINX_EXECUTABLE +# SPHINX_FOUND +# SPHINX_MAJOR_VERSION +# SPHINX_MINOR_VERSION +# SPHINX_VERSION + +#============================================================================= +# Copyright 2002-2009 Kitware, Inc. +# Copyright 2009-2011 Peter Colberg +# +# Distributed under the OSI-approved BSD License (the "License"); +# see accompanying file COPYING-CMAKE-SCRIPTS for details. +# +# This software is distributed WITHOUT ANY WARRANTY; without even the +# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +# See the License for more information. +#============================================================================= +# (To distribute this file outside of CMake, substitute the full +# License text for the above reference.) + +find_program(SPHINX_EXECUTABLE NAMES sphinx-build + HINTS + $ENV{SPHINX_DIR} + PATH_SUFFIXES bin + DOC "Sphinx documentation generator" +) + +include(FindPackageHandleStandardArgs) + +find_package_handle_standard_args(Sphinx DEFAULT_MSG + SPHINX_EXECUTABLE +) + +if (SPHINX_EXECUTABLE) + execute_process ( + COMMAND "${SPHINX_EXECUTABLE}" -h + OUTPUT_VARIABLE _SPHINX_VERSION_OUTPUT + ERROR_VARIABLE _SPHINX_VERSION_OUTPUT + ) + if (_SPHINX_VERSION_OUTPUT MATCHES "Sphinx v([0-9]+\\.[0-9]+\\.[0-9]+)") + set (SPHINX_VERSION "${CMAKE_MATCH_1}") + string (REPLACE "." ";" _SPHINX_VERSION_LIST "${SPHINX_VERSION}") + list (GET _SPHINX_VERSION_LIST 0 SPHINX_MAJOR_VERSION) + list (GET _SPHINX_VERSION_LIST 1 SPHINX_MINOR_VERSION) + # patch version meh :) + endif() +endif() + +message("${SPHINX_MAJOR_VERSION}") + +mark_as_advanced( + SPHINX_EXECUTABLE +) diff --git a/cmake/modules/Findcppcheck.cmake b/cmake/modules/Findcppcheck.cmake new file mode 100644 index 0000000..a044203 --- /dev/null +++ b/cmake/modules/Findcppcheck.cmake @@ -0,0 +1,142 @@ +# - try to find cppcheck tool +# +# Cache Variables: +# CPPCHECK_EXECUTABLE +# +# Non-cache variables you might use in your CMakeLists.txt: +# CPPCHECK_FOUND +# CPPCHECK_POSSIBLEERROR_ARG +# CPPCHECK_UNUSEDFUNC_ARG +# CPPCHECK_STYLE_ARG +# CPPCHECK_QUIET_ARG +# CPPCHECK_INCLUDEPATH_ARG +# CPPCHECK_FAIL_REGULAR_EXPRESSION +# CPPCHECK_WARN_REGULAR_EXPRESSION +# CPPCHECK_MARK_AS_ADVANCED - whether to mark our vars as advanced even +# if we don't find this program. +# +# Requires these CMake modules: +# FindPackageHandleStandardArgs (known included with CMake >=2.6.2) +# +# Original Author: +# 2009-2010 Ryan Pavlik +# http://academic.cleardefinition.com +# Iowa State University HCI Graduate Program/VRAC +# +# Copyright Iowa State University 2009-2010. +# Distributed under the Boost Software License, Version 1.0. +# (See accompanying file LICENSE_1_0.txt or copy at +# http://www.boost.org/LICENSE_1_0.txt) + +file(TO_CMAKE_PATH "${CPPCHECK_ROOT_DIR}" CPPCHECK_ROOT_DIR) +set(CPPCHECK_ROOT_DIR + "${CPPCHECK_ROOT_DIR}" + CACHE + PATH + "Path to search for cppcheck") + +# cppcheck app bundles on Mac OS X are GUI, we want command line only +set(_oldappbundlesetting ${CMAKE_FIND_APPBUNDLE}) +set(CMAKE_FIND_APPBUNDLE NEVER) + +# If we have a custom path, look there first. +if(CPPCHECK_ROOT_DIR) + find_program(CPPCHECK_EXECUTABLE + NAMES + cppcheck + cli + PATHS + "${CPPCHECK_ROOT_DIR}" + PATH_SUFFIXES + cli + NO_DEFAULT_PATH) +endif() + +find_program(CPPCHECK_EXECUTABLE NAMES cppcheck) + +# Restore original setting for appbundle finding +set(CMAKE_FIND_APPBUNDLE ${_oldappbundlesetting}) + +if(CPPCHECK_EXECUTABLE) + # Find out where our test file is + get_filename_component(_cppcheckmoddir ${CMAKE_CURRENT_LIST_FILE} PATH) + set(_cppcheckdummyfile "${_cppcheckmoddir}/Findcppcheck.cpp") + + # Check for the two types of command line arguments by just trying them + execute_process(COMMAND + "${CPPCHECK_EXECUTABLE}" + "--enable=style" + "--quiet" + "${_cppcheckdummyfile}" + RESULT_VARIABLE + _cppcheck_new_result + OUTPUT_QUIET + ERROR_QUIET) + execute_process(COMMAND + "${CPPCHECK_EXECUTABLE}" + "--style" + "--quiet" + "${_cppcheckdummyfile}" + RESULT_VARIABLE + _cppcheck_old_result + OUTPUT_QUIET + ERROR_QUIET) + if("${_cppcheck_new_result}" EQUAL 0) + # New arguments + set(CPPCHECK_UNUSEDFUNC_ARG "--enable=unusedFunctions") + set(CPPCHECK_POSSIBLEERROR_ARG "--enable=possibleError") + set(CPPCHECK_STYLE_ARG "--enable=style") + set(CPPCHECK_QUIET_ARG "--quiet") + set(CPPCHECK_INCLUDEPATH_ARG "-I") + if(MSVC) + set(CPPCHECK_TEMPLATE_ARG --template vs) + set(CPPCHECK_FAIL_REGULAR_EXPRESSION "[(]error[)]") + set(CPPCHECK_WARN_REGULAR_EXPRESSION "[(]style[)]") + elseif(CMAKE_COMPILER_IS_GNUCXX) + set(CPPCHECK_TEMPLATE_ARG --template gcc) + set(CPPCHECK_FAIL_REGULAR_EXPRESSION " error: ") + set(CPPCHECK_WARN_REGULAR_EXPRESSION " style: ") + else() + message(STATUS + "Warning: FindCppcheck doesn't know how to format error messages for your compiler!") + set(CPPCHECK_TEMPLATE_ARG --template gcc) + set(CPPCHECK_FAIL_REGULAR_EXPRESSION " error: ") + set(CPPCHECK_WARN_REGULAR_EXPRESSION " style: ") + endif() + elseif("${_cppcheck_old_result}" EQUAL 0) + # Old arguments + set(CPPCHECK_UNUSEDFUNC_ARG "--unused-functions") + set(CPPCHECK_POSSIBLEERROR_ARG "--all") + set(CPPCHECK_STYLE_ARG "--style") + set(CPPCHECK_QUIET_ARG "--quiet") + set(CPPCHECK_INCLUDEPATH_ARG "-I") + set(CPPCHECK_FAIL_REGULAR_EXPRESSION "error:") + set(CPPCHECK_WARN_REGULAR_EXPRESSION "[(]style[)]") + else() + # No idea - some other issue must be getting in the way + message(STATUS + "WARNING: Can't detect whether CPPCHECK wants new or old-style arguments!") + endif() + + +endif() + +set(CPPCHECK_ALL + "${CPPCHECK_EXECUTABLE} ${CPPCHECK_POSSIBLEERROR_ARG} ${CPPCHECK_UNUSEDFUNC_ARG} ${CPPCHECK_STYLE_ARG} ${CPPCHECK_QUIET_ARG} ${CPPCHECK_INCLUDEPATH_ARG} some/include/path") + +include(FindPackageHandleStandardArgs) +find_package_handle_standard_args(cppcheck + DEFAULT_MSG + CPPCHECK_ALL + CPPCHECK_EXECUTABLE + CPPCHECK_POSSIBLEERROR_ARG + CPPCHECK_UNUSEDFUNC_ARG + CPPCHECK_STYLE_ARG + CPPCHECK_INCLUDEPATH_ARG + CPPCHECK_QUIET_ARG) + +if(CPPCHECK_FOUND OR CPPCHECK_MARK_AS_ADVANCED) + mark_as_advanced(CPPCHECK_ROOT_DIR) +endif() + +mark_as_advanced(CPPCHECK_EXECUTABLE) diff --git a/cmake/modules/Findcppcheck.cpp b/cmake/modules/Findcppcheck.cpp new file mode 100644 index 0000000..84350db --- /dev/null +++ b/cmake/modules/Findcppcheck.cpp @@ -0,0 +1,16 @@ +/** + * \file Findcppcheck.cpp + * \brief Dummy C++ source file used by CMake module Findcppcheck.cmake + * + * \author + * Ryan Pavlik, 2009-2010 + * + * http://academic.cleardefinition.com/ + * + */ + + + +int main(int argc, char* argv[]) { + return 0; +} diff --git a/common.h b/common.h new file mode 100644 index 0000000..63528a5 --- /dev/null +++ b/common.h @@ -0,0 +1,45 @@ +/* + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + + Authors: Andrew Hutchings, SkySQL (andrew at skysql dot com) +*/ +#ifndef _common_h +#define _common_h + +char *hostname=NULL; +char *username=NULL; +char *password=NULL; +char *socket_path=NULL; +char *db=NULL; +guint port=3306; +guint num_threads= 4; +guint verbose=2; +gboolean compress_protocol= FALSE; +gboolean program_version= FALSE; + +GOptionEntry common_entries[] = +{ + { "host", 'h', 0, G_OPTION_ARG_STRING, &hostname, "The host to connect to", NULL }, + { "user", 'u', 0, G_OPTION_ARG_STRING, &username, "Username with privileges to run the dump", NULL }, + { "password", 'p', 0, G_OPTION_ARG_STRING, &password, "User password", NULL }, + { "port", 'P', 0, G_OPTION_ARG_INT, &port, "TCP/IP port to connect to", NULL }, + { "socket", 'S', 0, G_OPTION_ARG_STRING, &socket_path, "UNIX domain socket file to use for connection", NULL }, + { "threads", 't', 0, G_OPTION_ARG_INT, &num_threads, "Number of threads to use, default 4", NULL }, + { "compress-protocol", 'C', 0, G_OPTION_ARG_NONE, &compress_protocol, "Use compression on the MySQL connection", NULL }, + { "version", 'V', 0, G_OPTION_ARG_NONE, &program_version, "Show the program version and exit", NULL }, + { "verbose", 'v', 0, G_OPTION_ARG_INT, &verbose, "Verbosity of output, 0 = silent, 1 = errors, 2 = warnings, 3 = info, default 2", NULL }, + { NULL, 0, 0, G_OPTION_ARG_NONE, NULL, NULL, NULL } +}; + +#endif diff --git a/config.h.in b/config.h.in new file mode 100644 index 0000000..0c76528 --- /dev/null +++ b/config.h.in @@ -0,0 +1,7 @@ +#ifndef CONFIG_H +#define CONFIG_H + +#cmakedefine VERSION "@VERSION@" +#cmakedefine WITH_BINLOG + +#endif diff --git a/docs/CMakeLists.txt b/docs/CMakeLists.txt new file mode 100644 index 0000000..8fdb6b4 --- /dev/null +++ b/docs/CMakeLists.txt @@ -0,0 +1,156 @@ +# Generate documentation in HTML and PDF format using Sphinx. + +set(GENERATE_DOC TRUE) + +# We use the Sphinx documentation generator to render HTML and manual +# pages from the user and reference documentation in ReST format. +find_package(Sphinx QUIET) +if(NOT SPHINX_FOUND) + message(WARNING "Unable to find Sphinx documentation generator") + set(GENERATE_DOC FALSE) +endif(NOT SPHINX_FOUND) + +if(SPHINX_MAJOR_VERSION LESS 1) + message(WARNING "Sphinx is older than v1.0, not building docs") + set(GENERATE_DOC FALSE) +endif(SPHINX_MAJOR_VERSION LESS 1) + +if(GENERATE_DOC) + # documentation tools + set(SOURCE_BUILD_DIR "${CMAKE_CURRENT_SOURCE_DIR}/_build") + # configured documentation tools and intermediate build results + set(BINARY_BUILD_DIR "${CMAKE_CURRENT_BINARY_DIR}/_build") + # static ReST documentation sources + set(SOURCES_DIR "${CMAKE_CURRENT_BINARY_DIR}/_sources") + # generated ReST documentation sources + set(REF_SOURCES_DIR "${SOURCES_DIR}/reference") + # master document with modules index + set(REF_MASTER_DOC "modules") + + # substitute variables in configuration and scripts + foreach(file + conf.py + sources.cmake + ) + configure_file( + "${SOURCE_BUILD_DIR}/${file}.in" + "${BINARY_BUILD_DIR}/${file}" + @ONLY + ) + endforeach(file) + + set(CLEAN_FILES + "${BINARY_BUILD_DIR}/html" + ) + + add_custom_target(ALL + DEPENDS "${REF_SOURCES_DIR}/${REF_MASTER_DOC}.rst" + ) + + # Sphinx requires all sources in the same directory tree. As we wish + # to include generated reference documention from the build tree, we + # copy static ReST documents to the build tree before calling Sphinx. + add_custom_target(doc_sources ALL + "${CMAKE_COMMAND}" -P "${BINARY_BUILD_DIR}/sources.cmake" + ) + list(APPEND CLEAN_FILES + "${SOURCES_DIR}" + ) + + # note the trailing slash to exclude directory name + install(DIRECTORY "${SOURCES_DIR}/" + DESTINATION "share/doc/mydumper" + ) + + # Sphinx cache with pickled ReST documents + set(SPHINX_CACHE_DIR "${CMAKE_CURRENT_BINARY_DIR}/_doctrees") + # HTML output directory + set(SPHINX_HTML_DIR "${CMAKE_CURRENT_BINARY_DIR}/html") + + # This target builds HTML documentation using Sphinx. + add_custom_target(doc_html ALL + ${SPHINX_EXECUTABLE} + -q -b html + -c "${BINARY_BUILD_DIR}" + -d "${SPHINX_CACHE_DIR}" + "${SOURCES_DIR}" + "${SPHINX_HTML_DIR}" + COMMENT "Building HTML documentation with Sphinx" + ) + list(APPEND CLEAN_FILES + "${SPHINX_CACHE_DIR}" + "${SPHINX_HTML_DIR}" + ) + add_dependencies(doc_html + doc_sources + ) + install(DIRECTORY "${SPHINX_HTML_DIR}" + DESTINATION "share/doc/mydumper" + ) + + # HTML output directory + set(SPHINX_MAN_DIR "${CMAKE_CURRENT_BINARY_DIR}/man") + # This target builds a manual page using Sphinx. + + add_custom_target(doc_man ALL + ${SPHINX_EXECUTABLE} + -q -b man + -c "${BINARY_BUILD_DIR}" + -d "${SPHINX_CACHE_DIR}" + "${SOURCES_DIR}" + "${SPHINX_MAN_DIR}" + COMMENT "Building manual page with Sphinx" + ) + list(APPEND CLEAN_FILES + "${SPHINX_MAN_DIR}" + ) + add_dependencies(doc_man + doc_sources + ) + # serialize Sphinx targets to avoid cache conflicts in parallel builds + add_dependencies(doc_man + doc_html + ) + install(FILES "${SPHINX_MAN_DIR}/mydumper.1" "${SPHINX_MAN_DIR}/myloader.1" + DESTINATION "share/man/man1" + ) + + # This target builds PDF documentation using Sphinx and LaTeX. + if(PDFLATEX_COMPILER) + # PDF output directory + set(SPHINX_PDF_DIR "${CMAKE_CURRENT_BINARY_DIR}/pdf") + + add_custom_target(doc_pdf ALL + ${SPHINX_EXECUTABLE} + -q -b latex + -c "${BINARY_BUILD_DIR}" + -d "${SPHINX_CACHE_DIR}" + "${SOURCES_DIR}" + "${SPHINX_PDF_DIR}" + COMMENT "Building PDF documentation with Sphinx" + ) + add_custom_command(TARGET doc_pdf POST_BUILD + COMMAND ${CMAKE_MAKE_PROGRAM} LATEXOPTS=-interaction=batchmode + WORKING_DIRECTORY "${SPHINX_PDF_DIR}" + ) + list(APPEND CLEAN_FILES + "${SPHINX_PDF_DIR}" + ) + add_dependencies(doc_pdf + doc_sources + ) + # serialize Sphinx targets to avoid cache conflicts in parallel builds + add_dependencies(doc_pdf + doc_man + ) + install(FILES "${SPHINX_PDF_DIR}/mydumper.pdf" + DESTINATION "share/doc/mydumper" + ) + endif(PDFLATEX_COMPILER) + + # Add output directories to clean target. + set_directory_properties(PROPERTIES + ADDITIONAL_MAKE_CLEAN_FILES "${CLEAN_FILES}" + ) + +endif(GENERATE_DOC) diff --git a/docs/_build/conf.py.in b/docs/_build/conf.py.in new file mode 100644 index 0000000..9985c74 --- /dev/null +++ b/docs/_build/conf.py.in @@ -0,0 +1,218 @@ +# -*- coding: utf-8 -*- +# +# MySQL Data Dumper documentation build configuration file, created by +# sphinx-quickstart on Tue Apr 26 11:44:25 2011. +# +# This file is execfile()d with the current directory set to its containing dir. +# +# Note that not all possible configuration values are present in this +# autogenerated file. +# +# All configuration values have a default; values that are commented out +# serve to show the default. + +import sys, os + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +#sys.path.insert(0, os.path.abspath('.')) + +# -- General configuration ----------------------------------------------------- + +# If your documentation needs a minimal Sphinx version, state it here. +#needs_sphinx = '1.0' + +# Add any Sphinx extension module names here, as strings. They can be extensions +# coming with Sphinx (named 'sphinx.ext.*') or your custom ones. +extensions = ['sphinx.ext.todo'] + +# Add any paths that contain templates here, relative to this directory. +templates_path = ['@CMAKE_CURRENT_SOURCE_DIR@/_templates'] + +# The suffix of source filenames. +source_suffix = '.rst' + +# The encoding of source files. +#source_encoding = 'utf-8-sig' + +# The master toctree document. +master_doc = 'index' + +# General information about the project. +project = u'@PROJECT_NAME@' +copyright = u'2011, Andrew Hutchings' + +# The version info for the project you're documenting, acts as replacement for +# |version| and |release|, also used in various other places throughout the +# built documents. +# +# The short X.Y version. +version = '@VERSION@' +# The full version, including alpha/beta/rc tags. +release = '@VERSION@' + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +#language = None + +# There are two options for replacing |today|: either, you set today to some +# non-false value, then it is used: +#today = '' +# Else, today_fmt is used as the format for a strftime call. +#today_fmt = '%B %d, %Y' + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +exclude_patterns = ['_build'] + +# The reST default role (used for this markup: `text`) to use for all documents. +#default_role = None + +# If true, '()' will be appended to :func: etc. cross-reference text. +#add_function_parentheses = True + +# If true, the current module name will be prepended to all description +# unit titles (such as .. function::). +#add_module_names = True + +# If true, sectionauthor and moduleauthor directives will be shown in the +# output. They are ignored by default. +#show_authors = False + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = 'sphinx' + +# A list of ignored prefixes for module index sorting. +#modindex_common_prefix = [] + + +# -- Options for HTML output --------------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +html_theme = 'default' + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +#html_theme_options = {} + +# Add any paths that contain custom themes here, relative to this directory. +#html_theme_path = [] + +# The name for this set of Sphinx documents. If None, it defaults to +# " v documentation". +#html_title = None + +# A shorter title for the navigation bar. Default is the same as html_title. +#html_short_title = None + +# The name of an image file (relative to this directory) to place at the top +# of the sidebar. +#html_logo = None + +# The name of an image file (within the static path) to use as favicon of the +# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 +# pixels large. +#html_favicon = None + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ['@CMAKE_CURRENT_SOURCE_DIR@/_static'] + +# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, +# using the given strftime format. +#html_last_updated_fmt = '%b %d, %Y' + +# If true, SmartyPants will be used to convert quotes and dashes to +# typographically correct entities. +#html_use_smartypants = True + +# Custom sidebar templates, maps document names to template names. +#html_sidebars = {} + +# Additional templates that should be rendered to pages, maps page names to +# template names. +#html_additional_pages = {} + +# If false, no module index is generated. +#html_domain_indices = True + +# If false, no index is generated. +#html_use_index = True + +# If true, the index is split into individual pages for each letter. +#html_split_index = False + +# If true, links to the reST sources are added to the pages. +#html_show_sourcelink = True + +# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. +#html_show_sphinx = True + +# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. +#html_show_copyright = True + +# If true, an OpenSearch description file will be output, and all pages will +# contain a tag referring to it. The value of this option must be the +# base URL from which the finished HTML is served. +#html_use_opensearch = '' + +# This is the file name suffix for HTML files (e.g. ".xhtml"). +#html_file_suffix = None + +# Output file base name for HTML help builder. +htmlhelp_basename = 'MySQLDataDumperdoc' + + +# -- Options for LaTeX output -------------------------------------------------- + +# The paper size ('letter' or 'a4'). +#latex_paper_size = 'letter' + +# The font size ('10pt', '11pt' or '12pt'). +#latex_font_size = '10pt' + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, author, documentclass [howto/manual]). +latex_documents = [ + ('index', 'MySQLDataDumper.tex', u'@PROJECT_NAME@ Documentation', + u'Andrew Hutchings', 'manual'), +] + +# The name of an image file (relative to this directory) to place at the top of +# the title page. +#latex_logo = None + +# For "manual" documents, if this is true, then toplevel headings are parts, +# not chapters. +#latex_use_parts = False + +# If true, show page references after internal links. +#latex_show_pagerefs = False + +# If true, show URL addresses after external links. +#latex_show_urls = False + +# Additional stuff for the LaTeX preamble. +#latex_preamble = '' + +# Documents to append as an appendix to all manuals. +#latex_appendices = [] + +# If false, no module index is generated. +#latex_domain_indices = True + + +# -- Options for manual page output -------------------------------------------- + +# One entry per manual page. List of tuples +# (source start file, name, description, authors, manual section). +man_pages = [ + ('mydumper_usage', 'mydumper', u'@PROGRAM_DESC@', + [u'Andrew Hutchings'], 1), + ('myloader_usage', 'myloader', u'@PROGRAM_DESC@', + [u'Andrew Hutchings'], 1) +] diff --git a/docs/_build/sources.cmake.in b/docs/_build/sources.cmake.in new file mode 100644 index 0000000..6986129 --- /dev/null +++ b/docs/_build/sources.cmake.in @@ -0,0 +1,16 @@ +# This script recursively copies all ReST documents from the source directory to +# the binary directory. CMAKE_CURRENT_SOURCE_DIR and SOURCES_DIR are substituted +# upon the cmake stage. The script is executed upon the make stage to ensure +# that the binary sources directory is always up to date. + +file(GLOB SOURCES + RELATIVE "@CMAKE_CURRENT_SOURCE_DIR@" + "@CMAKE_CURRENT_SOURCE_DIR@/*.rst" +) +foreach(source ${SOURCES}) + configure_file( + "@CMAKE_CURRENT_SOURCE_DIR@/${source}" + "@SOURCES_DIR@/${source}" + COPYONLY + ) +endforeach(source) diff --git a/docs/authors.rst b/docs/authors.rst new file mode 100644 index 0000000..936b8b9 --- /dev/null +++ b/docs/authors.rst @@ -0,0 +1,9 @@ +Authors +======= + +The code for mydumper has been written by the following people: + + * `Domas Mituzas `_, Facebook ( domas at fb dot com ) + * `Andrew Hutchings `_, SkySQL ( andrew at skysql dot com ) + * `Mark Leith `_, Oracle Corporation ( mark dot leith at oracle dot com ) + * `Max Bubenick `_, Percona RDBA ( max dot bubenick at percona dot com ) diff --git a/docs/compiling.rst b/docs/compiling.rst new file mode 100644 index 0000000..6a0cc02 --- /dev/null +++ b/docs/compiling.rst @@ -0,0 +1,70 @@ +Compiling +========= + +Requirements +------------ + +mydumper requires the following before it can be compiled: + + * `CMake `_ + * `Glib2 `_ (with development packages) + * `PCRE `_ (with development packages) + * `MySQL `_ client libraries (with development packages) + +Additionally the following packages are optional: + + * `python-sphinx `_ (for documentation) + +Ubuntu/Debian +^^^^^^^^^^^^^ + +.. code-block:: bash + + apt-get install libglib2.0-dev libmysqlclient15-dev zlib1g-dev libpcre3-dev + +Fedora/Redhat/CentOS +^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: bash + + yum install glib2-devel mysql-devel zlib-devel pcre-devel + +OpenSUSE +^^^^^^^^ + +.. code-block:: bash + + zypper install glib2-devel libmysqlclient-devel pcre-devel zlib-devel + +Mac OSX +^^^^^^^ + +.. code-block:: bash + + port install glib2 mysql5 pcre + +CMake +----- + +CMake is used for mydumper's build system and is executed as follows:: + + cmake . + make + +You can optionally provide parameters for CMake, the possible options are: + + * ``-DMYSQL_CONFIG=/path/to/mysql_config`` - The path and filename for the mysql_config executable + * ``-DCMAKE_INSTALL_PREFIX=/install/path`` - The path where mydumper should be installed + +Documentation +------------- + +If you wish to just compile the documentation you can do so with:: + + cmake . + make doc_html + +or for a man page output:: + + cmake . + make doc_man diff --git a/docs/examples.rst b/docs/examples.rst new file mode 100644 index 0000000..14f74ef --- /dev/null +++ b/docs/examples.rst @@ -0,0 +1,37 @@ +Examples +======== + +Simple Usage +------------ +Just running :program:`mydumper` without any options will try to connect to a +server using the default socket path. It will then dump the tables from all +databases using 4 worker threads. + +Regex +----- +To use :program:`mydumper`'s regex feature simply use the +:option:`--regex ` option. In the following example mydumper +will ignore the ``test`` and ``mysql`` databases:: + + mydumper --regex '^(?!(mysql|test))' + +Restoring a dump +---------------- +Mydumper now include myloader which is a multi-threaded restoration tool. To +use myloader with a mydumper dump you simply need to pass it the directory of +the dump along with a user capable of restoring the schemas and data. As an +example the following will restore a dump overwriting any existing tables:: + + myloader --directory=export-20110614-094953 --overwrite-tables --user=root + +Daemon mode +----------- +Mydumper has a daemon mode which will snapshot the dump data every so often +whilst continuously retreiving the binary log files. This gives a continuous +consistent backup right up to the point where the database server fails. To use +this you simply need to use the :option:`--daemon ` option. + +In the following example mydumper will use daemon mode, creating a snapshot +every half an hour and log to an output file:: + + mydumper --daemon --snapshot-interval=30 --logfile=dump.log diff --git a/docs/files.rst b/docs/files.rst new file mode 100644 index 0000000..dd7a181 --- /dev/null +++ b/docs/files.rst @@ -0,0 +1,61 @@ +Output Files +============ + +mydumper generates several files during the generation of the dump. Many of +these are for the table data itself since every table has at least one file. + +Metadata +-------- +When a dump is executed a file called ``metadata.partial`` is created in the output +directory and is renamed to ``metadata`` when mydumper finish without error. +This contains the start and end time of the dump as well as the +master binary log positions if applicable. + +This is an example of the content of this file:: + + Started dump at: 2011-05-05 13:57:17 + SHOW MASTER STATUS: + Log: linuxjedi-laptop-bin.000001 + Pos: 106 + + Finished dump at: 2011-05-05 13:57:17 + +Table Data +---------- +The data from every table is written into a separate file, also if the +:option:`--rows ` option is used then each chunk of table will +be in a separate file. The file names for this are in the format:: + + database.table.sql(.gz) + +or if chunked:: + + database.table.chunk.sql(.gz) + +Where 'chunk' is a number padded with up to 5 zeros. + +Table Schemas +------------- +When the :option:`--schemas ` option is used mydumper will +create a file for the schema of every table it is writing data for. The files +for this are in the following format:: + + database.table-schema.sql(.gz) + +Binary Logs +----------- +Binary logs are retrieved when :option:`--binlogs ` option +has been set. This will store them in the ``binlog_snapshot/`` sub-directory +inside the dump directory. + +The binary log files have the same filename as the MySQL server that supplies them and will also have a .gz on the end if they are compressed. + +Daemon mode +----------- +Daemon mode does things a little differently. There are the directories ``0`` +and ``1`` inside the dump directory. These alternate when dumping so that if +mydumper fails for any reason there is still a good snapshot. When a snapshot +dump is complete the ``last_dump`` symlink is updated to point to that dump. + +If binary logging is enabled mydumper will connect as if it is a slave server +and constantly retreives the binary logs into the ``binlogs`` subdirectory. diff --git a/docs/index.rst b/docs/index.rst new file mode 100644 index 0000000..fa23211 --- /dev/null +++ b/docs/index.rst @@ -0,0 +1,25 @@ +.. MySQL Data Dumper documentation master file + You can adapt this file completely to your liking, but it should at least + contain the root `toctree` directive. + +Welcome to MySQL Data Dumper's documentation! +============================================= + +Contents: + +.. toctree:: + :maxdepth: 2 + + authors + compiling + mydumper_usage + myloader_usage + files + examples + +Indices and tables +================== + +* :ref:`genindex` +* :ref:`search` + diff --git a/docs/mydumper_usage.rst b/docs/mydumper_usage.rst new file mode 100644 index 0000000..fa35287 --- /dev/null +++ b/docs/mydumper_usage.rst @@ -0,0 +1,193 @@ +Mydumper Usage +============== + +Synopsis +-------- + +:program:`mydumper` [:ref:`OPTIONS `] + +Description +----------- + +:program:`mydumper` is a tool used for backing up MySQL database servers much +faster than the mysqldump tool distributed with MySQL. It also has the +capability to retrieve the binary logs from the remote server at the same time +as the dump itself. The advantages of mydumper are: + + * Parallelism (hence, speed) and performance (avoids expensive character set conversion routines, efficient code overall) + * Easier to manage output (separate files for tables, dump metadata, etc, easy to view/parse data) + * Consistency - maintains snapshot across all threads, provides accurate master and slave log positions, etc + * Manageability - supports PCRE for specifying database and tables inclusions and exclusions + +.. _mydumper-options-label: + +Options +------- + +The :program:`mydumper` tool has several available options: + +.. program:: mydumper + +.. option:: --help, -? + + Show help text + +.. option:: --host, -h + + Hostname of MySQL server to connect to (default localhost) + +.. option:: --user, -u + + MySQL username with the correct privileges to execute the dump + +.. option:: --password, -p + + The corresponding password for the MySQL user + +.. option:: --port, -P + + The port for the MySQL connection. + + .. note:: + + For localhost TCP connections use 127.0.0.1 for :option:`--host`. + +.. option:: --socket, -S + + The UNIX domain socket file to use for the connection + +.. option:: --database, -B + + Database to dump + +.. option:: --tables-list, -T + + A comma separated list of tables to dump + +.. option:: --threads, -t + + The number of threads to use for dumping data, default is 4 + + .. note:: + + Other threads are used in mydumper, this option does not control these + +.. option:: --outputdir, -o + + Output directory name, default is export-YYYYMMDD-HHMMSS + +.. option:: --statement-size, -s + + The maximum size for an insert statement before breaking into a new + statement, default 1,000,000 bytes + +.. option:: --rows, -r + + Split table into chunks of this many rows, default unlimited + +.. option:: --compress, -c + + Compress the output files + +.. option:: --compress-input, -C + + Use client protocol compression for connections to the MySQL server + +.. option:: --build-empty-files, -e + + Create empty dump files if there is no data to dump + +.. option:: --regex, -x + + A regular expression to match against database and table + +.. option:: --ignore-engines, -i + + Comma separated list of storage engines to ignore + +.. option:: --no-schemas, -m + + Do not dump schemas with the data + +.. option:: --no-data, -d + + Do not dump table data + +.. option:: --triggers, -G + + Dump triggers + +.. option:: --events, -E + + Dump events + +.. option:: --routines, -R + + Dump stored procedures and functions + +.. option:: --long-query-guard, -l + + Timeout for long query execution in seconds, default 60 + +.. option:: --kill-long-queries, -K + + Kill long running queries instead of aborting the dump + +.. option:: --version, -V + + Show the program version and exit + +.. option:: --verbose, -v + + The verbosity of messages. 0 = silent, 1 = errors, 2 = warnings, 3 = info. + Default is 2. + +.. option:: --binlogs, -b + + Get the binlogs from the server as well as the dump files (You need to compile with -DWITH_BINLOG=ON) + +.. option:: --daemon, -D + + Enable daemon mode + +.. option:: --snapshot-interval, -I + + Interval between each dump snapshot (in minutes), requires + :option:`--daemon`, default 60 (minutes) + +.. option:: --logfile, -L + + A file to log mydumper output to instead of console output. Useful for + daemon mode. + +.. option:: --no-locks, -k + + Do not execute the temporary shared read lock. + + .. warning:: + + This will cause inconsistent backups. + +.. option:: --[skip-]tz-utc + + SET TIME_ZONE='+00:00' at top of dump to allow dumping of TIMESTAMP data + when a server has data in different time zones or data is being moved + between servers with different time zones, defaults to on use --skip-tz-utc + to disable. + +.. option:: --less-locking + + Minimize locking time on InnoDB tables grabbing a LOCK TABLE ... READ + on all non-innodb tables. + +.. option:: --chunk-filesize -F + + Split tables into chunks of this output file size. This value is in MB + +.. option:: --success-on-1146 + + Not increment error count and Warning instead of Critical in case of table doesn't exist + +.. option:: --use-savepoints + + Use savepoints to reduce metadata locking issues, needs SUPER privilege diff --git a/docs/myloader_usage.rst b/docs/myloader_usage.rst new file mode 100644 index 0000000..286eedd --- /dev/null +++ b/docs/myloader_usage.rst @@ -0,0 +1,99 @@ +Myloader Usage +============== + +Synopsis +-------- + +:program:`myloader` :option:`--directory ` = /path/to/mydumper/backup [:ref:`OPTIONS `] + +Description +----------- + +:program:`myloader` is a tool used for multi-threaded restoration of mydumper +backups. + +.. _myloader-options-label: + +Options +------- + +The :program:`myloader` tool has several available options: + +.. program:: myloader + +.. option:: --help, -? + + Show help text + +.. option:: --host, -h + + Hostname of MySQL server to connect to (default localhost) + +.. option:: --user, -u + + MySQL username with the correct privileges to execute the restoration + +.. option:: --password, -p + + The corresponding password for the MySQL user + +.. option:: --port, -P + + The port for the MySQL connection. + + .. note:: + + For localhost TCP connections use 127.0.0.1 for :option:`--host`. + +.. option:: --socket, -S + + The UNIX domain socket file to use for the connection + +.. option:: --threads, -t + + The number of threads to use for restoring data, default is 4 + +.. option:: --version, -V + + Show the program version and exit + +.. option:: --compress-protocol, -C + + Use client protocol compression for connections to the MySQL server + +.. option:: --directory, -d + + The directory of the mydumper backup to restore + +.. option:: --database, -B + + An alternative database to load the dump into + + .. note:: + + For use with single database dumps. When using with multi-database dumps + that have duplicate table names in more than one database it may cause + errors. Alternatively this scenario may give unpredictable results with + :option:`--overwrite-tables`. + +.. option:: --source-db, -s + + Database to restore, useful in combination with --database + +.. option:: --queries-per-transaction, -q + + Number of INSERT queries to execute per transaction during restore, default + is 1000. + +.. option:: --overwrite-tables, -o + + Drop any existing tables when restoring schemas + +.. option:: --enable-binlog, -e + + Log the data loading in the MySQL binary log if enabled (off by default) + +.. option:: --verbose, -v + + The verbosity of messages. 0 = silent, 1 = errors, 2 = warnings, 3 = info. + Default is 2. diff --git a/g_unix_signal.c b/g_unix_signal.c new file mode 100644 index 0000000..557fb15 --- /dev/null +++ b/g_unix_signal.c @@ -0,0 +1,128 @@ +#define _POSIX_SOURCE +#include +#include + +static GPtrArray *signal_data = NULL; + +typedef struct _GUnixSignalData { + guint source_id; + GMainContext *context; + gboolean triggered; + gint signum; +} GUnixSignalData; + +typedef struct _GUnixSignalSource { + GSource source; + GUnixSignalData *data; +} GUnixSignalSource; + +static inline GUnixSignalData* get_signal_data(guint index) +{ + return (GUnixSignalData*)g_ptr_array_index(signal_data, index); +} + +static void handler(gint signum) { + g_assert(signal_data != NULL); + guint i; + for (i = 0; i < signal_data->len; ++i) + if (get_signal_data(i)->signum == signum) + get_signal_data(i)->triggered = TRUE; + + struct sigaction action; + action.sa_handler= handler; + sigemptyset (&action.sa_mask); + action.sa_flags = 0; + sigaction(signum, &action, NULL); +} + +static gboolean check(GSource *source) +{ + GUnixSignalSource *signal_source = (GUnixSignalSource*) source; + return signal_source->data->triggered; +} + +static gboolean prepare(GSource *source, gint *timeout_) +{ + GUnixSignalSource *signal_source = (GUnixSignalSource*) source; + if (signal_source->data->context == NULL) { + g_main_context_ref(signal_source->data->context = g_source_get_context(source)); + signal_source->data->source_id = g_source_get_id(source); + } + + *timeout_ = -1; + return signal_source->data->triggered; +} + +static gboolean dispatch(GSource *source, GSourceFunc callback, gpointer user_data) +{ + GUnixSignalSource *signal_source = (GUnixSignalSource*) source; + signal_source->data->triggered = FALSE; + return callback(user_data) ? TRUE : FALSE; +} +static void finalize(GSource *source) +{ + GUnixSignalSource *signal_source = (GUnixSignalSource*) source; + + struct sigaction action; + action.sa_handler= NULL; + sigemptyset (&action.sa_mask); + action.sa_flags = 0; + + sigaction(signal_source->data->signum, &action, NULL); + g_main_context_unref(signal_source->data->context); + g_ptr_array_remove_fast(signal_data, signal_source->data); + if (signal_data->len == 0) + signal_data = (GPtrArray*) g_ptr_array_free(signal_data, TRUE); + g_free(signal_source->data); + +} +static GSourceFuncs SourceFuncs = +{ + .prepare = prepare, + .check = check, + .dispatch = dispatch, + .finalize = finalize, + .closure_callback = NULL, .closure_marshal = NULL +}; + +static void g_unix_signal_source_init(GSource *source, gint signum) +{ + GUnixSignalSource *signal_source = (GUnixSignalSource *) source; + signal_source->data = g_new(GUnixSignalData, 1); + signal_source->data->triggered = FALSE; + signal_source->data->signum = signum; + signal_source->data->context = NULL; + + if (signal_data == NULL) + signal_data = g_ptr_array_new(); + g_ptr_array_add(signal_data, signal_source->data); +} + +GSource *g_unix_signal_source_new(gint signum) +{ + GSource *source = g_source_new(&SourceFuncs, sizeof(GUnixSignalSource)); + g_unix_signal_source_init(source, signum); + struct sigaction action; + action.sa_handler= handler; + sigemptyset (&action.sa_mask); + action.sa_flags = 0; + sigaction(signum, &action, NULL); + return source; +} + +guint g_unix_signal_add_full(gint priority, gint signum, GSourceFunc function, gpointer data, GDestroyNotify notify) +{ + g_return_val_if_fail(function != NULL, 0); + GSource *source = g_unix_signal_source_new(signum); + if (priority != G_PRIORITY_DEFAULT) + g_source_set_priority (source, priority); + g_source_set_callback(source, function, data, notify); + guint id = g_source_attach(source, NULL); + g_source_unref(source); + return id; +} + +guint g_unix_signal_add(gint signum, GSourceFunc function, gpointer data) +{ + return g_unix_signal_add_full(G_PRIORITY_DEFAULT, signum, function, data, NULL); +} diff --git a/g_unix_signal.h b/g_unix_signal.h new file mode 100644 index 0000000..f9d6f8b --- /dev/null +++ b/g_unix_signal.h @@ -0,0 +1,10 @@ +#ifndef G_UNIX_SIGNAL_H +#define G_UNIX_SIGNAL_H + +#include + +GSource *g_unix_signal_source_new(gint signum); +guint g_unix_signal_add(gint signum, GSourceFunc function, gpointer data); +guint g_unix_signal_add_full(gint priority, gint signum, GSourceFunc function, gpointer data, GDestroyNotify notify); + +#endif /* G_UNIX_SIGNAL_H */ diff --git a/mydumper.c b/mydumper.c new file mode 100644 index 0000000..7d282d9 --- /dev/null +++ b/mydumper.c @@ -0,0 +1,2890 @@ +/* + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + + Authors: Domas Mituzas, Facebook ( domas at fb dot com ) + Mark Leith, Oracle Corporation (mark dot leith at oracle dot com) + Andrew Hutchings, SkySQL (andrew at skysql dot com) + Max Bubenick, Percona RDBA (max dot bubenick at percona dot com) +*/ + +#define _LARGEFILE64_SOURCE +#define _FILE_OFFSET_BITS 64 + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "config.h" +#ifdef WITH_BINLOG +#include "binlog.h" +#else +#include "mydumper.h" +#endif +#include "server_detect.h" +#include "common.h" +#include "g_unix_signal.h" +#include + +char *regexstring=NULL; + +const char DIRECTORY[]= "export"; +#ifdef WITH_BINLOG +const char BINLOG_DIRECTORY[]= "binlog_snapshot"; +const char DAEMON_BINLOGS[]= "binlogs"; +#endif + +static GMutex * init_mutex = NULL; + +/* Program options */ +gchar *output_directory= NULL; +guint statement_size= 1000000; +guint rows_per_file= 0; +guint chunk_filesize = 0; +int longquery= 60; +int build_empty_files= 0; +int skip_tz= 0; +int need_dummy_read= 0; +int need_dummy_toku_read = 0; +int compress_output= 0; +int killqueries= 0; +int detected_server= 0; +int lock_all_tables=0; +guint snapshot_interval= 60; +gboolean daemon_mode= FALSE; +gboolean have_snapshot_cloning= FALSE; + +gchar *ignore_engines= NULL; +char **ignore= NULL; + +gchar *tables_list= NULL; +char **tables= NULL; +GList *no_updated_tables=NULL; + +#ifdef WITH_BINLOG +gboolean need_binlogs= FALSE; +gchar *binlog_directory= NULL; +gchar *daemon_binlog_directory= NULL; +#endif + +gchar *logfile= NULL; +FILE *logoutfile= NULL; + +gboolean no_schemas= FALSE; +gboolean no_data= FALSE; +gboolean no_locks= FALSE; +gboolean dump_triggers= FALSE; +gboolean dump_events= FALSE; +gboolean dump_routines= FALSE; +gboolean less_locking = FALSE; +gboolean use_savepoints = FALSE; +gboolean success_on_1146 = FALSE; + +GList *innodb_tables= NULL; +GList *non_innodb_table= NULL; +GList *table_schemas= NULL; +GList *view_schemas= NULL; +GList *schema_post= NULL; +gint non_innodb_table_counter= 0; +gint non_innodb_done= 0; +guint less_locking_threads = 0; +guint updated_since = 0; +guint trx_consistency_only = 0; + +// For daemon mode, 0 or 1 +guint dump_number= 0; +guint binlog_connect_id= 0; +gboolean shutdown_triggered= FALSE; +GAsyncQueue *start_scheduled_dump; +GMainLoop *m1; +static GCond * ll_cond = NULL; +static GMutex * ll_mutex = NULL; + +int errors; + +static GOptionEntry entries[] = +{ + { "database", 'B', 0, G_OPTION_ARG_STRING, &db, "Database to dump", NULL }, + { "tables-list", 'T', 0, G_OPTION_ARG_STRING, &tables_list, "Comma delimited table list to dump (does not exclude regex option)", NULL }, + { "outputdir", 'o', 0, G_OPTION_ARG_FILENAME, &output_directory, "Directory to output files to", NULL }, + { "statement-size", 's', 0, G_OPTION_ARG_INT, &statement_size, "Attempted size of INSERT statement in bytes, default 1000000", NULL}, + { "rows", 'r', 0, G_OPTION_ARG_INT, &rows_per_file, "Try to split tables into chunks of this many rows. This option turns off --chunk-filesize", NULL}, + { "chunk-filesize", 'F', 0, G_OPTION_ARG_INT, &chunk_filesize, "Split tables into chunks of this output file size. This value is in MB", NULL }, + { "compress", 'c', 0, G_OPTION_ARG_NONE, &compress_output, "Compress output files", NULL}, + { "build-empty-files", 'e', 0, G_OPTION_ARG_NONE, &build_empty_files, "Build dump files even if no data available from table", NULL}, + { "regex", 'x', 0, G_OPTION_ARG_STRING, ®exstring, "Regular expression for 'db.table' matching", NULL}, + { "ignore-engines", 'i', 0, G_OPTION_ARG_STRING, &ignore_engines, "Comma delimited list of storage engines to ignore", NULL }, + { "no-schemas", 'm', 0, G_OPTION_ARG_NONE, &no_schemas, "Do not dump table schemas with the data", NULL }, + { "no-data", 'd', 0, G_OPTION_ARG_NONE, &no_data, "Do not dump table data", NULL }, + { "triggers", 'G', 0, G_OPTION_ARG_NONE, &dump_triggers, "Dump triggers", NULL }, + { "events", 'E', 0, G_OPTION_ARG_NONE, &dump_events, "Dump events", NULL }, + { "routines", 'R', 0, G_OPTION_ARG_NONE, &dump_routines, "Dump stored procedures and functions", NULL }, + { "no-locks", 'k', 0, G_OPTION_ARG_NONE, &no_locks, "Do not execute the temporary shared read lock. WARNING: This will cause inconsistent backups", NULL }, + { "less-locking", 0, 0, G_OPTION_ARG_NONE, &less_locking, "Minimize locking time on InnoDB tables.", NULL}, + { "long-query-guard", 'l', 0, G_OPTION_ARG_INT, &longquery, "Set long query timer in seconds, default 60", NULL }, + { "kill-long-queries", 'K', 0, G_OPTION_ARG_NONE, &killqueries, "Kill long running queries (instead of aborting)", NULL }, +#ifdef WITH_BINLOG + { "binlogs", 'b', 0, G_OPTION_ARG_NONE, &need_binlogs, "Get a snapshot of the binary logs as well as dump data", NULL }, +#endif + { "daemon", 'D', 0, G_OPTION_ARG_NONE, &daemon_mode, "Enable daemon mode", NULL }, + { "snapshot-interval", 'I', 0, G_OPTION_ARG_INT, &snapshot_interval, "Interval between each dump snapshot (in minutes), requires --daemon, default 60", NULL }, + { "logfile", 'L', 0, G_OPTION_ARG_FILENAME, &logfile, "Log file name to use, by default stdout is used", NULL }, + { "tz-utc", 0, 0, G_OPTION_ARG_NONE, NULL, "SET TIME_ZONE='+00:00' at top of dump to allow dumping of TIMESTAMP data when a server has data in different time zones or data is being moved between servers with different time zones, defaults to on use --skip-tz-utc to disable.", NULL }, + { "skip-tz-utc", 0, 0, G_OPTION_ARG_NONE, &skip_tz, "", NULL }, + { "use-savepoints", 0, 0, G_OPTION_ARG_NONE, &use_savepoints, "Use savepoints to reduce metadata locking issues, needs SUPER privilege", NULL }, + { "success-on-1146", 0, 0, G_OPTION_ARG_NONE, &success_on_1146, "Not increment error count and Warning instead of Critical in case of table doesn't exist", NULL}, + { "lock-all-tables", 0, 0, G_OPTION_ARG_NONE, &lock_all_tables, "Use LOCK TABLE for all, instead of FTWRL", NULL}, + { "updated-since", 'U', 0, G_OPTION_ARG_INT, &updated_since, "Use Update_time to dump only tables updated in the last U days", NULL}, + { "trx-consistency-only", 0, 0, G_OPTION_ARG_NONE, &trx_consistency_only, "Transactional consistency only", NULL}, + { NULL, 0, 0, G_OPTION_ARG_NONE, NULL, NULL, NULL } +}; + +struct tm tval; + +void dump_schema_data(MYSQL *conn, char *database, char *table, char *filename); +void dump_triggers_data(MYSQL *conn, char *database, char *table, char *filename); +void dump_view_data(MYSQL *conn, char *database, char *table, char *filename, char *filename2); +void dump_schema(MYSQL *conn, char *database, char *table, struct configuration *conf); +void dump_view(char *database, char *table, struct configuration *conf); +void dump_table(MYSQL *conn, char *database, char *table, struct configuration *conf, gboolean is_innodb); +void dump_tables(MYSQL *, GList *, struct configuration *); +void dump_schema_post(char *database, struct configuration *conf); +void restore_charset(GString* statement); +void set_charset(GString* statement, char *character_set, char *collation_connection); +void dump_schema_post_data(MYSQL *conn, char *database, char *filename); +guint64 dump_table_data(MYSQL *, FILE *, char *, char *, char *, char *); +void dump_database(MYSQL *, char *, FILE *, struct configuration *); +void dump_create_database(MYSQL *conn, char *database); +void get_tables(MYSQL * conn, struct configuration *); +void get_not_updated(MYSQL *conn); +GList * get_chunks_for_table(MYSQL *, char *, char*, struct configuration *conf); +guint64 estimate_count(MYSQL *conn, char *database, char *table, char *field, char *from, char *to); +void dump_table_data_file(MYSQL *conn, char *database, char *table, char *where, char *filename); +void create_backup_dir(char *directory); +gboolean write_data(FILE *,GString*); +gboolean check_regex(char *database, char *table); +void no_log(const gchar *log_domain, GLogLevelFlags log_level, const gchar *message, gpointer user_data); +void set_verbose(guint verbosity); +#ifdef WITH_BINLOG +MYSQL *reconnect_for_binlog(MYSQL *thrconn); +void *binlog_thread(void *data); +#endif +void start_dump(MYSQL *conn); +MYSQL *create_main_connection(); +void *exec_thread(void *data); +void write_log_file(const gchar *log_domain, GLogLevelFlags log_level, const gchar *message, gpointer user_data); + +void no_log(const gchar *log_domain, GLogLevelFlags log_level, const gchar *message, gpointer user_data) { + (void) log_domain; + (void) log_level; + (void) message; + (void) user_data; +} + +void set_verbose(guint verbosity) { + if (logfile) { + logoutfile = g_fopen(logfile, "w"); + if (!logoutfile) { + g_critical("Could not open log file '%s' for writing: %d", logfile, errno); + exit(EXIT_FAILURE); + } + } + + switch (verbosity) { + case 0: + g_log_set_handler(NULL, (GLogLevelFlags)(G_LOG_LEVEL_MASK), no_log, NULL); + break; + case 1: + g_log_set_handler(NULL, (GLogLevelFlags)(G_LOG_LEVEL_WARNING | G_LOG_LEVEL_MESSAGE), no_log, NULL); + if (logfile) + g_log_set_handler(NULL, (GLogLevelFlags)(G_LOG_LEVEL_ERROR | G_LOG_LEVEL_CRITICAL), write_log_file, NULL); + break; + case 2: + g_log_set_handler(NULL, (GLogLevelFlags)(G_LOG_LEVEL_MESSAGE), no_log, NULL); + if (logfile) + g_log_set_handler(NULL, (GLogLevelFlags)(G_LOG_LEVEL_WARNING | G_LOG_LEVEL_ERROR | G_LOG_LEVEL_WARNING | G_LOG_LEVEL_ERROR | G_LOG_LEVEL_CRITICAL), write_log_file, NULL); + break; + default: + if (logfile) + g_log_set_handler(NULL, (GLogLevelFlags)(G_LOG_LEVEL_MASK), write_log_file, NULL); + break; + } +} + +gboolean sig_triggered(gpointer user_data) { + (void) user_data; + + g_message("Shutting down gracefully"); + shutdown_triggered= TRUE; + g_main_loop_quit(m1); + return FALSE; +} + +void clear_dump_directory() +{ + GError *error= NULL; + char* dump_directory= g_strdup_printf("%s/%d", output_directory, dump_number); + GDir* dir= g_dir_open(dump_directory, 0, &error); + + if (error) { + g_critical("cannot open directory %s, %s\n", dump_directory, error->message); + errors++; + return; + } + + const gchar* filename= NULL; + + while((filename= g_dir_read_name(dir))) { + gchar* path= g_build_filename(dump_directory, filename, NULL); + if (g_unlink(path) == -1) { + g_critical("error removing file %s (%d)\n", path, errno); + errors++; + return; + } + g_free(path); + } + + g_dir_close(dir); + g_free(dump_directory); +} + +gboolean run_snapshot(gpointer *data) +{ + (void) data; + + g_async_queue_push(start_scheduled_dump,GINT_TO_POINTER(1)); + + return (shutdown_triggered) ? FALSE : TRUE; +} + +/* Check database.table string against regular expression */ + +gboolean check_regex(char *database, char *table) { + /* This is not going to be used in threads */ + static pcre *re = NULL; + int rc; + int ovector[9]= {0}; + const char *error; + int erroroffset; + + char *p; + + /* Let's compile the RE before we do anything */ + if (!re) { + re = pcre_compile(regexstring,PCRE_CASELESS|PCRE_MULTILINE,&error,&erroroffset,NULL); + if(!re) { + g_critical("Regular expression fail: %s", error); + exit(EXIT_FAILURE); + } + } + + p=g_strdup_printf("%s.%s",database,table); + rc = pcre_exec(re,NULL,p,strlen(p),0,0,ovector,9); + g_free(p); + + return (rc>0)?TRUE:FALSE; +} + +/* Write some stuff we know about snapshot, before it changes */ +void write_snapshot_info(MYSQL *conn, FILE *file) { + MYSQL_RES *master=NULL, *slave=NULL, *mdb=NULL; + MYSQL_FIELD *fields; + MYSQL_ROW row; + + char *masterlog=NULL; + char *masterpos=NULL; + char *mastergtid=NULL; + + char *connname=NULL; + char *slavehost=NULL; + char *slavelog=NULL; + char *slavepos=NULL; + char *slavegtid=NULL; + guint isms; + guint i; + + mysql_query(conn,"SHOW MASTER STATUS"); + master=mysql_store_result(conn); + if (master && (row=mysql_fetch_row(master))) { + masterlog=row[0]; + masterpos=row[1]; + /* Oracle/Percona GTID */ + if(mysql_num_fields(master) == 5) { + mastergtid=row[4]; + } else { + /* Let's try with MariaDB 10.x */ + mysql_query(conn, "SELECT @@gtid_current_pos"); + mdb=mysql_store_result(conn); + if (mdb && (row=mysql_fetch_row(mdb))) { + mastergtid=row[0]; + } + } + } + + if (masterlog) { + fprintf(file, "SHOW MASTER STATUS:\n\tLog: %s\n\tPos: %s\n\tGTID:%s\n\n", masterlog, masterpos,mastergtid); + g_message("Written master status"); + } + + isms = 0; + mysql_query(conn,"SELECT @@default_master_connection"); + MYSQL_RES *rest = mysql_store_result(conn); + if(rest != NULL && mysql_num_rows(rest)){ + mysql_free_result(rest); + g_message("Multisource slave detected."); + isms = 1; + } + + if (isms) + mysql_query(conn, "SHOW ALL SLAVES STATUS"); + else + mysql_query(conn, "SHOW SLAVE STATUS"); + + slave=mysql_store_result(conn); + while (slave && (row=mysql_fetch_row(slave))) { + fields=mysql_fetch_fields(slave); + for (i=0; iconf; + // mysql_init is not thread safe, especially in Connector/C + g_mutex_lock(init_mutex); + MYSQL *thrconn = mysql_init(NULL); + g_mutex_unlock(init_mutex); + + mysql_options(thrconn,MYSQL_READ_DEFAULT_GROUP,"mydumper"); + + if (compress_protocol) + mysql_options(thrconn,MYSQL_OPT_COMPRESS,NULL); + + if (!mysql_real_connect(thrconn, hostname, username, password, NULL, port, socket_path, 0)) { + g_critical("Failed to connect to database: %s", mysql_error(thrconn)); + exit(EXIT_FAILURE); + } else { + g_message("Thread %d connected using MySQL connection ID %lu", td->thread_id, mysql_thread_id(thrconn)); + } + + if(use_savepoints && mysql_query(thrconn, "SET SQL_LOG_BIN = 0")){ + g_critical("Failed to disable binlog for the thread: %s",mysql_error(thrconn)); + exit(EXIT_FAILURE); + } + if ((detected_server == SERVER_TYPE_MYSQL) && mysql_query(thrconn, "SET SESSION wait_timeout = 2147483")){ + g_warning("Failed to increase wait_timeout: %s", mysql_error(thrconn)); + } + if (mysql_query(thrconn, "SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ")) { + g_warning("Failed to set isolation level: %s", mysql_error(thrconn)); + } + if (mysql_query(thrconn, "START TRANSACTION /*!40108 WITH CONSISTENT SNAPSHOT */")) { + g_critical("Failed to start consistent snapshot: %s",mysql_error(thrconn)); + errors++; + } + if(!skip_tz && mysql_query(thrconn, "/*!40103 SET TIME_ZONE='+00:00' */")){ + g_critical("Failed to set time zone: %s",mysql_error(thrconn)); + } + + /* Unfortunately version before 4.1.8 did not support consistent snapshot transaction starts, so we cheat */ + if (need_dummy_read) { + mysql_query(thrconn,"SELECT /*!40001 SQL_NO_CACHE */ * FROM mysql.mydumperdummy"); + MYSQL_RES *res=mysql_store_result(thrconn); + if (res) + mysql_free_result(res); + } + if(need_dummy_toku_read){ + mysql_query(thrconn,"SELECT /*!40001 SQL_NO_CACHE */ * FROM mysql.tokudbdummy"); + MYSQL_RES *res=mysql_store_result(thrconn); + if (res) + mysql_free_result(res); + } + mysql_query(thrconn, "/*!40101 SET NAMES binary*/"); + + g_async_queue_push(conf->ready,GINT_TO_POINTER(1)); + + struct job* job= NULL; + struct table_job* tj= NULL; + struct schema_job* sj= NULL; + struct view_job* vj= NULL; + struct schema_post_job* sp= NULL; + #ifdef WITH_BINLOG + struct binlog_job* bj= NULL; + #endif + /* if less locking we need to wait until that threads finish + progressively waking up these threads */ + if(less_locking){ + g_mutex_lock(ll_mutex); + + while (less_locking_threads >= td->thread_id) { + g_cond_wait (ll_cond, ll_mutex); + } + + g_mutex_unlock(ll_mutex); + } + + for(;;) { + + GTimeVal tv; + g_get_current_time(&tv); + g_time_val_add(&tv,1000*1000*1); + job=(struct job *)g_async_queue_pop(conf->queue); + if (shutdown_triggered && (job->type != JOB_SHUTDOWN)) { + continue; + } + + switch (job->type) { + case JOB_DUMP: + tj=(struct table_job *)job->job_data; + if (tj->where) + g_message("Thread %d dumping data for `%s`.`%s` where %s", td->thread_id, tj->database, tj->table, tj->where); + else + g_message("Thread %d dumping data for `%s`.`%s`", td->thread_id, tj->database, tj->table); + if(use_savepoints && mysql_query(thrconn, "SAVEPOINT mydumper")){ + g_critical("Savepoint failed: %s",mysql_error(thrconn)); + } + dump_table_data_file(thrconn, tj->database, tj->table, tj->where, tj->filename); + if(use_savepoints && mysql_query(thrconn, "ROLLBACK TO SAVEPOINT mydumper")){ + g_critical("Rollback to savepoint failed: %s",mysql_error(thrconn)); + } + if(tj->database) g_free(tj->database); + if(tj->table) g_free(tj->table); + if(tj->where) g_free(tj->where); + if(tj->filename) g_free(tj->filename); + g_free(tj); + g_free(job); + break; + case JOB_DUMP_NON_INNODB: + tj=(struct table_job *)job->job_data; + if (tj->where) + g_message("Thread %d dumping data for `%s`.`%s` where %s", td->thread_id, tj->database, tj->table, tj->where); + else + g_message("Thread %d dumping data for `%s`.`%s`", td->thread_id, tj->database, tj->table); + if(use_savepoints && mysql_query(thrconn, "SAVEPOINT mydumper")){ + g_critical("Savepoint failed: %s",mysql_error(thrconn)); + } + dump_table_data_file(thrconn, tj->database, tj->table, tj->where, tj->filename); + if(use_savepoints && mysql_query(thrconn, "ROLLBACK TO SAVEPOINT mydumper")){ + g_critical("Rollback to savepoint failed: %s",mysql_error(thrconn)); + } + if(tj->database) g_free(tj->database); + if(tj->table) g_free(tj->table); + if(tj->where) g_free(tj->where); + if(tj->filename) g_free(tj->filename); + g_free(tj); + g_free(job); + if (g_atomic_int_dec_and_test(&non_innodb_table_counter) && g_atomic_int_get(&non_innodb_done)) { + g_async_queue_push(conf->unlock_tables, GINT_TO_POINTER(1)); + } + break; + case JOB_SCHEMA: + sj=(struct schema_job *)job->job_data; + g_message("Thread %d dumping schema for `%s`.`%s`", td->thread_id, sj->database, sj->table); + dump_schema_data(thrconn, sj->database, sj->table, sj->filename); + if(sj->database) g_free(sj->database); + if(sj->table) g_free(sj->table); + if(sj->filename) g_free(sj->filename); + g_free(sj); + g_free(job); + break; + case JOB_VIEW: + vj=(struct view_job *)job->job_data; + g_message("Thread %d dumping view for `%s`.`%s`", td->thread_id, vj->database, vj->table); + dump_view_data(thrconn, vj->database, vj->table, vj->filename, vj->filename2); + if(vj->database) g_free(vj->database); + if(vj->table) g_free(vj->table); + if(vj->filename) g_free(vj->filename); + if(vj->filename2) g_free(vj->filename2); + g_free(vj); + g_free(job); + break; + case JOB_TRIGGERS: + sj=(struct schema_job *)job->job_data; + g_message("Thread %d dumping triggers for `%s`.`%s`", td->thread_id, sj->database, sj->table); + dump_triggers_data(thrconn, sj->database, sj->table, sj->filename); + if(sj->database) g_free(sj->database); + if(sj->table) g_free(sj->table); + if(sj->filename) g_free(sj->filename); + g_free(sj); + g_free(job); + break; + case JOB_SCHEMA_POST: + sp=(struct schema_post_job *)job->job_data; + g_message("Thread %d dumping SP and VIEWs for `%s`", td->thread_id, sp->database); + dump_schema_post_data(thrconn, sp->database, sp->filename); + if(sp->database) g_free(sp->database); + if(sp->filename) g_free(sp->filename); + g_free(sp); + g_free(job); + break; + #ifdef WITH_BINLOG + case JOB_BINLOG: + thrconn= reconnect_for_binlog(thrconn); + g_message("Thread %d connected using MySQL connection ID %lu (in binlog mode)", td->thread_id, mysql_thread_id(thrconn)); + bj=(struct binlog_job *)job->job_data; + g_message("Thread %d dumping binary log file %s", td->thread_id, bj->filename); + get_binlog_file(thrconn, bj->filename, binlog_directory, bj->start_position, bj->stop_position, FALSE); + if(bj->filename) + g_free(bj->filename); + g_free(bj); + g_free(job); + break; + #endif + case JOB_SHUTDOWN: + g_message("Thread %d shutting down", td->thread_id); + if (thrconn) + mysql_close(thrconn); + g_free(job); + mysql_thread_end(); + return NULL; + break; + default: + g_critical("Something very bad happened!"); + exit(EXIT_FAILURE); + } + } + if (thrconn) + mysql_close(thrconn); + mysql_thread_end(); + return NULL; +} + +void *process_queue_less_locking(struct thread_data *td) { + struct configuration *conf= td->conf; + // mysql_init is not thread safe, especially in Connector/C + g_mutex_lock(init_mutex); + MYSQL *thrconn = mysql_init(NULL); + g_mutex_unlock(init_mutex); + + mysql_options(thrconn,MYSQL_READ_DEFAULT_GROUP,"mydumper"); + + if (compress_protocol) + mysql_options(thrconn,MYSQL_OPT_COMPRESS,NULL); + + if (!mysql_real_connect(thrconn, hostname, username, password, NULL, port, socket_path, 0)) { + g_critical("Failed to connect to database: %s", mysql_error(thrconn)); + exit(EXIT_FAILURE); + } else { + g_message("Thread %d connected using MySQL connection ID %lu", td->thread_id, mysql_thread_id(thrconn)); + } + + if ((detected_server == SERVER_TYPE_MYSQL) && mysql_query(thrconn, "SET SESSION wait_timeout = 2147483")){ + g_warning("Failed to increase wait_timeout: %s", mysql_error(thrconn)); + } + if(!skip_tz && mysql_query(thrconn, "/*!40103 SET TIME_ZONE='+00:00' */")){ + g_critical("Failed to set time zone: %s",mysql_error(thrconn)); + } + mysql_query(thrconn, "/*!40101 SET NAMES binary*/"); + + g_async_queue_push(conf->ready_less_locking,GINT_TO_POINTER(1)); + + struct job* job= NULL; + struct table_job* tj= NULL; + struct tables_job* mj=NULL; + struct schema_job* sj= NULL; + struct view_job* vj= NULL; + struct schema_post_job* sp= NULL; + #ifdef WITH_BINLOG + struct binlog_job* bj= NULL; + #endif + GList* glj; + int first = 1; + GString *query= g_string_sized_new(1024); + GString *prev_table = g_string_sized_new(100); + GString *prev_database = g_string_sized_new(100); + + for(;;) { + GTimeVal tv; + g_get_current_time(&tv); + g_time_val_add(&tv,1000*1000*1); + job=(struct job *)g_async_queue_pop(conf->queue_less_locking); + if (shutdown_triggered && (job->type != JOB_SHUTDOWN)) { + continue; + } + + switch (job->type) { + case JOB_LOCK_DUMP_NON_INNODB: + mj=(struct tables_job *)job->job_data; + glj = g_list_copy(mj->table_job_list); + for (glj= g_list_first(glj); glj; glj= g_list_next(glj)) { + tj = (struct table_job *)glj->data; + if(first){ + g_string_printf(query, "LOCK TABLES `%s`.`%s` READ LOCAL",tj->database,tj->table); + first = 0; + }else{ + if(g_ascii_strcasecmp(prev_database->str, tj->database) || g_ascii_strcasecmp(prev_table->str, tj->table)){ + g_string_append_printf(query, ", `%s`.`%s` READ LOCAL",tj->database,tj->table); + } + } + g_string_printf(prev_table, "%s", tj->table); + g_string_printf(prev_database, "%s", tj->database); + } + first = 1; + if(mysql_query(thrconn,query->str)){ + g_critical("Non Innodb lock tables fail: %s", mysql_error(thrconn)); + exit(EXIT_FAILURE); + } + if (g_atomic_int_dec_and_test(&non_innodb_table_counter) && g_atomic_int_get(&non_innodb_done)) { + g_async_queue_push(conf->unlock_tables, GINT_TO_POINTER(1)); + } + for (mj->table_job_list= g_list_first(mj->table_job_list); mj->table_job_list; mj->table_job_list= g_list_next(mj->table_job_list)) { + tj = (struct table_job *)mj->table_job_list->data; + if (tj->where) + g_message("Thread %d dumping data for `%s`.`%s` where %s", td->thread_id, tj->database, tj->table, tj->where); + else + g_message("Thread %d dumping data for `%s`.`%s`", td->thread_id, tj->database, tj->table); + dump_table_data_file(thrconn, tj->database, tj->table, tj->where, tj->filename); + if(tj->database) g_free(tj->database); + if(tj->table) g_free(tj->table); + if(tj->where) g_free(tj->where); + if(tj->filename) g_free(tj->filename); + g_free(tj); + } + mysql_query(thrconn, "UNLOCK TABLES /* Non Innodb */"); + g_free(g_list_first(mj->table_job_list)); + g_free(mj); + g_free(job); + break; + case JOB_SCHEMA: + sj=(struct schema_job *)job->job_data; + g_message("Thread %d dumping schema for `%s`.`%s`", td->thread_id, sj->database, sj->table); + dump_schema_data(thrconn, sj->database, sj->table, sj->filename); + if(sj->database) g_free(sj->database); + if(sj->table) g_free(sj->table); + if(sj->filename) g_free(sj->filename); + g_free(sj); + g_free(job); + break; + case JOB_VIEW: + vj=(struct view_job *)job->job_data; + g_message("Thread %d dumping view for `%s`.`%s`", td->thread_id, sj->database, sj->table); + dump_view_data(thrconn, vj->database, vj->table, vj->filename, vj->filename2); + if(vj->database) g_free(vj->database); + if(vj->table) g_free(vj->table); + if(vj->filename) g_free(vj->filename); + if(vj->filename2) g_free(vj->filename2); + g_free(vj); + g_free(job); + break; + case JOB_TRIGGERS: + sj=(struct schema_job *)job->job_data; + g_message("Thread %d dumping triggers for `%s`.`%s`", td->thread_id, sj->database, sj->table); + dump_triggers_data(thrconn, sj->database, sj->table, sj->filename); + if(sj->database) g_free(sj->database); + if(sj->table) g_free(sj->table); + if(sj->filename) g_free(sj->filename); + g_free(sj); + g_free(job); + break; + case JOB_SCHEMA_POST: + sp=(struct schema_post_job *)job->job_data; + g_message("Thread %d dumping SP and VIEWs for `%s`", td->thread_id, sp->database); + dump_schema_post_data(thrconn, sp->database, sp->filename); + if(sp->database) g_free(sp->database); + if(sp->filename) g_free(sp->filename); + g_free(sp); + g_free(job); + break; + #ifdef WITH_BINLOG + case JOB_BINLOG: + thrconn= reconnect_for_binlog(thrconn); + g_message("Thread %d connected using MySQL connection ID %lu (in binlog mode)", td->thread_id, mysql_thread_id(thrconn)); + bj=(struct binlog_job *)job->job_data; + g_message("Thread %d dumping binary log file %s", td->thread_id, bj->filename); + get_binlog_file(thrconn, bj->filename, binlog_directory, bj->start_position, bj->stop_position, FALSE); + if(bj->filename) + g_free(bj->filename); + g_free(bj); + g_free(job); + break; + #endif + case JOB_SHUTDOWN: + g_message("Thread %d shutting down", td->thread_id); + g_mutex_lock(ll_mutex); + less_locking_threads--; + g_cond_broadcast(ll_cond); + g_mutex_unlock(ll_mutex); + if (thrconn) + mysql_close(thrconn); + g_free(job); + mysql_thread_end(); + return NULL; + break; + default: + g_critical("Something very bad happened!"); + exit(EXIT_FAILURE); + } + } + if (thrconn) + mysql_close(thrconn); + mysql_thread_end(); + return NULL; +} +#ifdef WITH_BINLOG +MYSQL *reconnect_for_binlog(MYSQL *thrconn) { + if (thrconn) { + mysql_close(thrconn); + } + g_mutex_lock(init_mutex); + thrconn= mysql_init(NULL); + g_mutex_unlock(init_mutex); + + if (compress_protocol) + mysql_options(thrconn,MYSQL_OPT_COMPRESS,NULL); + + int timeout= 1; + mysql_options(thrconn, MYSQL_OPT_READ_TIMEOUT, (const char*)&timeout); + + if (!mysql_real_connect(thrconn, hostname, username, password, NULL, port, socket_path, 0)) { + g_critical("Failed to re-connect to database: %s", mysql_error(thrconn)); + exit(EXIT_FAILURE); + } + return thrconn; +} +#endif +int main(int argc, char *argv[]) +{ + GError *error = NULL; + GOptionContext *context; + + g_thread_init(NULL); + + init_mutex = g_mutex_new(); + ll_mutex = g_mutex_new(); + ll_cond = g_cond_new(); + + context = g_option_context_new("multi-threaded MySQL dumping"); + GOptionGroup *main_group= g_option_group_new("main", "Main Options", "Main Options", NULL, NULL); + g_option_group_add_entries(main_group, entries); + g_option_group_add_entries(main_group, common_entries); + g_option_context_set_main_group(context, main_group); + if (!g_option_context_parse(context, &argc, &argv, &error)) { + g_print ("option parsing failed: %s, try --help\n", error->message); + exit (EXIT_FAILURE); + } + g_option_context_free(context); + + if (program_version) { + g_print("mydumper %s, built against MySQL %s\n", VERSION, MYSQL_SERVER_VERSION); + exit (EXIT_SUCCESS); + } + + set_verbose(verbose); + + time_t t; + time(&t);localtime_r(&t,&tval); + + //rows chunks have precedence over chunk_filesize + if (rows_per_file > 0 && chunk_filesize > 0){ + chunk_filesize = 0; + g_warning("--chunk-filesize disabled by --rows option"); + } + + //until we have an unique option on lock types we need to ensure this + if(no_locks || trx_consistency_only) + less_locking = 0; + + /* savepoints workaround to avoid metadata locking issues + doesnt work for chuncks */ + if(rows_per_file && use_savepoints){ + use_savepoints = FALSE; + g_warning("--use-savepoints disabled by --rows"); + } + + //clarify binlog coordinates with trx_consistency_only + if(trx_consistency_only) + g_warning("Using trx_consistency_only, binlog coordinates will not be accurate if you are writing to non transactional tables."); + + if (!output_directory) + output_directory = g_strdup_printf("%s-%04d%02d%02d-%02d%02d%02d",DIRECTORY, + tval.tm_year+1900, tval.tm_mon+1, tval.tm_mday, + tval.tm_hour, tval.tm_min, tval.tm_sec); + + create_backup_dir(output_directory); + if (daemon_mode) { + pid_t pid, sid; + + pid= fork(); + if (pid < 0) + exit(EXIT_FAILURE); + else if (pid > 0) + exit(EXIT_SUCCESS); + + umask(0); + sid= setsid(); + + if (sid < 0) + exit(EXIT_FAILURE); + + char *dump_directory= g_strdup_printf("%s/0", output_directory); + create_backup_dir(dump_directory); + g_free(dump_directory); + dump_directory= g_strdup_printf("%s/1", output_directory); + create_backup_dir(dump_directory); + g_free(dump_directory); + #ifdef WITH_BINLOG + daemon_binlog_directory= g_strdup_printf("%s/%s", output_directory, DAEMON_BINLOGS); + create_backup_dir(daemon_binlog_directory); + #endif + } + #ifdef WITH_BINLOG + if (need_binlogs) { + binlog_directory = g_strdup_printf("%s/%s", output_directory, BINLOG_DIRECTORY); + create_backup_dir(binlog_directory); + } + #endif + /* Give ourselves an array of engines to ignore */ + if (ignore_engines) + ignore = g_strsplit(ignore_engines, ",", 0); + + /* Give ourselves an array of tables to dump */ + if (tables_list) + tables = g_strsplit(tables_list, ",", 0); + + if (daemon_mode) { + GError* terror; + #ifdef WITH_BINLOG + GThread *bthread= g_thread_create(binlog_thread, GINT_TO_POINTER(1), FALSE, &terror); + if (bthread == NULL) { + g_critical("Could not create binlog thread: %s", terror->message); + g_error_free(terror); + exit(EXIT_FAILURE); + } + #endif + start_scheduled_dump= g_async_queue_new(); + GThread *ethread= g_thread_create(exec_thread, GINT_TO_POINTER(1), FALSE, &terror); + if (ethread == NULL) { + g_critical("Could not create exec thread: %s", terror->message); + g_error_free(terror); + exit(EXIT_FAILURE); + } + // Run initial snapshot + run_snapshot(NULL); + #if GLIB_MINOR_VERSION < 14 + g_timeout_add(snapshot_interval*60*1000, (GSourceFunc) run_snapshot, NULL); + #else + g_timeout_add_seconds(snapshot_interval*60, (GSourceFunc) run_snapshot, NULL); + #endif + guint sigsource= g_unix_signal_add(SIGINT, sig_triggered, NULL); + sigsource= g_unix_signal_add(SIGTERM, sig_triggered, NULL); + m1= g_main_loop_new(NULL, TRUE); + g_main_loop_run(m1); + g_source_remove(sigsource); + } else { + MYSQL *conn= create_main_connection(); + start_dump(conn); + } + + //sleep(5); + mysql_thread_end(); + mysql_library_end(); + g_free(output_directory); + g_strfreev(ignore); + g_strfreev(tables); + + if (logoutfile) { + fclose(logoutfile); + } + + exit(errors ? EXIT_FAILURE : EXIT_SUCCESS); +} + +MYSQL *create_main_connection() +{ + MYSQL *conn; + conn = mysql_init(NULL); + mysql_options(conn,MYSQL_READ_DEFAULT_GROUP,"mydumper"); + + if (!mysql_real_connect(conn, hostname, username, password, db, port, socket_path, 0)) { + g_critical("Error connecting to database: %s", mysql_error(conn)); + exit(EXIT_FAILURE); + } + + detected_server= detect_server(conn); + + if ((detected_server == SERVER_TYPE_MYSQL) && mysql_query(conn, "SET SESSION wait_timeout = 2147483")){ + g_warning("Failed to increase wait_timeout: %s", mysql_error(conn)); + } + if ((detected_server == SERVER_TYPE_MYSQL) && mysql_query(conn, "SET SESSION net_write_timeout = 2147483")){ + g_warning("Failed to increase net_write_timeout: %s", mysql_error(conn)); + } + + switch (detected_server) { + case SERVER_TYPE_MYSQL: + g_message("Connected to a MySQL server"); + break; + case SERVER_TYPE_DRIZZLE: + g_message("Connected to a Drizzle server"); + break; + default: + g_critical("Cannot detect server type"); + exit(EXIT_FAILURE); + break; + } + + return conn; +} + +void *exec_thread(void *data) { + (void) data; + + while(1) { + g_async_queue_pop(start_scheduled_dump); + clear_dump_directory(); + MYSQL *conn= create_main_connection(); + start_dump(conn); + mysql_close(conn); + mysql_thread_end(); + + // Don't switch the symlink on shutdown because the dump is probably incomplete. + if (!shutdown_triggered) { + const char *dump_symlink_source= (dump_number == 0) ? "0" : "1"; + char *dump_symlink_dest= g_strdup_printf("%s/last_dump", output_directory); + + // We don't care if this fails + g_unlink(dump_symlink_dest); + + if (symlink(dump_symlink_source, dump_symlink_dest) == -1) { + g_critical("error setting last good dump symlink %s, %d", dump_symlink_dest, errno); + } + g_free(dump_symlink_dest); + + dump_number= (dump_number == 1) ? 0 : 1; + } + } + return NULL; +} +#ifdef WITH_BINLOG +void *binlog_thread(void *data) { + (void) data; + MYSQL_RES *master= NULL; + MYSQL_ROW row; + MYSQL *conn; + conn = mysql_init(NULL); + mysql_options(conn,MYSQL_READ_DEFAULT_GROUP,"mydumper"); + + if (!mysql_real_connect(conn, hostname, username, password, db, port, socket_path, 0)) { + g_critical("Error connecting to database: %s", mysql_error(conn)); + exit(EXIT_FAILURE); + } + + mysql_query(conn,"SHOW MASTER STATUS"); + master= mysql_store_result(conn); + if (master && (row= mysql_fetch_row(master))) { + MYSQL *binlog_connection= NULL; + binlog_connection= reconnect_for_binlog(binlog_connection); + binlog_connect_id= mysql_thread_id(binlog_connection); + guint64 start_position= g_ascii_strtoull(row[1], NULL, 10); + gchar* filename= g_strdup(row[0]); + mysql_free_result(master); + mysql_close(conn); + g_message("Continuous binlog thread connected using MySQL connection ID %lu", mysql_thread_id(binlog_connection)); + get_binlog_file(binlog_connection, filename, daemon_binlog_directory, start_position, 0, TRUE); + g_free(filename); + mysql_close(binlog_connection); + } else { + mysql_free_result(master); + mysql_close(conn); + } + g_message("Continuous binlog thread shutdown"); + mysql_thread_end(); + return NULL; +} +#endif +void start_dump(MYSQL *conn) +{ + struct configuration conf = { 1, NULL, NULL, NULL, NULL, NULL, NULL, 0 }; + char *p; + char *p2; + char *p3; + char *u; + + guint64 nits[num_threads]; + GList* nitl[num_threads]; + int tn = 0; + guint64 min = 0; + time_t t; + struct db_table *dbt; + struct schema_post *sp; + guint n; + FILE* nufile = NULL; + + for(n=0;n 0){ + if (daemon_mode) + u= g_strdup_printf("%s/%d/not_updated_tables", output_directory, dump_number); + else + u= g_strdup_printf("%s/not_updated_tables", output_directory); + nufile=g_fopen(u,"w"); + if(!nufile) { + g_critical("Couldn't write not_updated_tables file (%d)",errno); + exit(EXIT_FAILURE); + } + get_not_updated(conn); + } + + /* We check SHOW PROCESSLIST, and if there're queries + larger than preset value, we terminate the process. + + This avoids stalling whole server with flush */ + + if (mysql_query(conn, "SHOW PROCESSLIST")) { + g_warning("Could not check PROCESSLIST, no long query guard enabled: %s", mysql_error(conn)); + } else { + MYSQL_RES *res = mysql_store_result(conn); + MYSQL_ROW row; + + /* Just in case PROCESSLIST output column order changes */ + MYSQL_FIELD *fields = mysql_fetch_fields(res); + guint i; + int tcol=-1, ccol=-1, icol=-1; + for(i=0; ilongquery) { + if (killqueries) { + if (mysql_query(conn,p3=g_strdup_printf("KILL %lu",atol(row[icol])))) + g_warning("Could not KILL slow query: %s",mysql_error(conn)); + else + g_warning("Killed a query that was running for %ss",row[tcol]); + g_free(p3); + } else { + g_critical("There are queries in PROCESSLIST running longer than %us, aborting dump,\n\t" + "use --long-query-guard to change the guard value, kill queries (--kill-long-queries) or use \n\tdifferent server for dump", longquery); + exit(EXIT_FAILURE); + } + } + } + mysql_free_result(res); + } + + if (!no_locks) { + if(lock_all_tables){ + // LOCK ALL TABLES + GString *query= g_string_sized_new(16777216); + gchar *dbtb = NULL; + gchar **dt= NULL; + GList *tables_lock = NULL; + GList *iter = NULL; + guint success = 0; + guint retry = 0; + guint lock = 1; + int i = 0; + + if(db){ + g_string_printf(query, "SELECT TABLE_SCHEMA, TABLE_NAME FROM information_schema.TABLES WHERE TABLE_SCHEMA = '%s' AND TABLE_TYPE ='BASE TABLE' AND NOT (TABLE_SCHEMA = 'mysql' AND (TABLE_NAME = 'slow_log' OR TABLE_NAME = 'general_log'))", db); + } else if (tables) { + for (i = 0; tables[i] != NULL; i++){ + dt = g_strsplit(tables[i], ".", 0); + dbtb = g_strdup_printf("`%s`.`%s`",dt[0],dt[1]); + tables_lock = g_list_append(tables_lock,dbtb); + } + }else{ + g_string_printf(query, "SELECT TABLE_SCHEMA, TABLE_NAME FROM information_schema.TABLES WHERE TABLE_TYPE ='BASE TABLE' AND TABLE_SCHEMA NOT IN ('information_schema', 'performance_schema', 'data_dictionary') AND NOT (TABLE_SCHEMA = 'mysql' AND (TABLE_NAME = 'slow_log' OR TABLE_NAME = 'general_log'))"); + } + + if (tables_lock == NULL) { + if(mysql_query(conn, query->str)){ + g_critical("Couldn't get table list for lock all tables: %s",mysql_error(conn)); + errors++; + }else{ + MYSQL_RES *res = mysql_store_result(conn); + MYSQL_ROW row; + + while ((row=mysql_fetch_row(res))) { + lock = 1; + if (tables) { + int table_found=0; + for (i = 0; tables[i] != NULL; i++) + if (g_ascii_strcasecmp(tables[i], row[1]) == 0) + table_found = 1; + if (!table_found) + lock = 0; + } + if (lock && regexstring && !check_regex(row[0],row[1])) + continue; + + if(lock) { + dbtb = g_strdup_printf("`%s`.`%s`",row[0],row[1]); + tables_lock = g_list_append(tables_lock,dbtb); + } + } + } + } + + // Try three times to get the lock, this is in case of tmp tables disappearing + while(!success && retry < 4){ + n = 0; + iter = tables_lock; + for (iter= g_list_first(iter); iter; iter= g_list_next(iter)) { + if(n == 0){ + g_string_printf(query, "LOCK TABLE %s READ", (char *) iter->data); + n = 1; + }else{ + g_string_append_printf(query, ", %s READ",(char *) iter->data); + } + } + if(mysql_query(conn,query->str)){ + gchar *failed_table = NULL; + gchar **tmp_fail; + + tmp_fail = g_strsplit(mysql_error(conn), "'",0); + tmp_fail = g_strsplit(tmp_fail[1], ".", 0); + failed_table = g_strdup_printf("`%s`.`%s`", tmp_fail[0], tmp_fail[1]); + iter = tables_lock; + for (iter= g_list_first(iter); iter; iter= g_list_next(iter)) { + if(strcmp (iter->data, failed_table) == 0){ + tables_lock = g_list_remove(tables_lock, iter->data); + } + } + g_free(tmp_fail); + g_free(failed_table); + }else{ + success = 1; + } + retry += 1; + } + if(!success){ + g_critical("Lock all tables fail: %s", mysql_error(conn)); + exit(EXIT_FAILURE); + } + g_free(query->str); + g_list_free(tables_lock); + }else{ + if(mysql_query(conn, "FLUSH TABLES WITH READ LOCK")) { + g_critical("Couldn't acquire global lock, snapshots will not be consistent: %s",mysql_error(conn)); + errors++; + } + } + } else { + g_warning("Executing in no-locks mode, snapshot will notbe consistent"); + } + if (mysql_get_server_version(conn) < 40108) { + mysql_query(conn, "CREATE TABLE IF NOT EXISTS mysql.mydumperdummy (a INT) ENGINE=INNODB"); + need_dummy_read=1; + } + + //tokudb do not support consistent snapshot + mysql_query(conn,"SELECT @@tokudb_version"); + MYSQL_RES *rest = mysql_store_result(conn); + if(rest != NULL && mysql_num_rows(rest)){ + mysql_free_result(rest); + g_message("TokuDB detected, creating dummy table for CS"); + mysql_query(conn, "CREATE TABLE IF NOT EXISTS mysql.tokudbdummy (a INT) ENGINE=TokuDB"); + need_dummy_toku_read=1; + } + + mysql_query(conn, "START TRANSACTION /*!40108 WITH CONSISTENT SNAPSHOT */"); + if (need_dummy_read) { + mysql_query(conn,"SELECT /*!40001 SQL_NO_CACHE */ * FROM mysql.mydumperdummy"); + MYSQL_RES *res=mysql_store_result(conn); + if (res) + mysql_free_result(res); + } + if(need_dummy_toku_read){ + mysql_query(conn,"SELECT /*!40001 SQL_NO_CACHE */ * FROM mysql.tokudbdummy"); + MYSQL_RES *res=mysql_store_result(conn); + if (res) + mysql_free_result(res); + } + time(&t); localtime_r(&t,&tval); + fprintf(mdfile,"Started dump at: %04d-%02d-%02d %02d:%02d:%02d\n", + tval.tm_year+1900, tval.tm_mon+1, tval.tm_mday, + tval.tm_hour, tval.tm_min, tval.tm_sec); + + g_message("Started dump at: %04d-%02d-%02d %02d:%02d:%02d\n", + tval.tm_year+1900, tval.tm_mon+1, tval.tm_mday, + tval.tm_hour, tval.tm_min, tval.tm_sec); + + if (detected_server == SERVER_TYPE_MYSQL) { + mysql_query(conn, "/*!40101 SET NAMES binary*/"); + + write_snapshot_info(conn, mdfile); + } + + GThread **threads = g_new(GThread*,num_threads*(less_locking+1)); + struct thread_data *td= g_new(struct thread_data, num_threads*(less_locking+1)); + + if(less_locking){ + conf.queue_less_locking = g_async_queue_new(); + conf.ready_less_locking = g_async_queue_new(); + less_locking_threads = num_threads; + for (n=num_threads; ndata; + tn = 0; + min = nits[0]; + for (n=1; ndatalength; + } + + for (n=0; n 0){ + g_atomic_int_inc(&non_innodb_table_counter); + dump_tables(conn, nitl[n], &conf); + } + } + g_list_free(g_list_first(non_innodb_table)); + + if(g_atomic_int_get(&non_innodb_table_counter)) + g_atomic_int_inc(&non_innodb_done); + else + g_async_queue_push(conf.unlock_tables, GINT_TO_POINTER(1)); + + for (n=0; ntype = JOB_SHUTDOWN; + g_async_queue_push(conf.queue_less_locking,j); + } + }else{ + for (non_innodb_table= g_list_first(non_innodb_table); non_innodb_table; non_innodb_table= g_list_next(non_innodb_table)) { + dbt= (struct db_table*) non_innodb_table->data; + dump_table(conn, dbt->database, dbt->table, &conf, FALSE); + g_atomic_int_inc(&non_innodb_table_counter); + } + g_list_free(g_list_first(non_innodb_table)); + g_atomic_int_inc(&non_innodb_done); + } + + for (innodb_tables= g_list_first(innodb_tables); innodb_tables; innodb_tables= g_list_next(innodb_tables)) { + dbt= (struct db_table*) innodb_tables->data; + dump_table(conn, dbt->database, dbt->table, &conf, TRUE); + } + g_list_free(g_list_first(innodb_tables)); + + for (table_schemas= g_list_first(table_schemas); table_schemas; table_schemas= g_list_next(table_schemas)) { + dbt= (struct db_table*) table_schemas->data; + dump_schema(conn, dbt->database, dbt->table, &conf); + g_free(dbt->table); + g_free(dbt->database); + g_free(dbt); + } + g_list_free(g_list_first(table_schemas)); + + for (view_schemas= g_list_first(view_schemas); view_schemas; view_schemas= g_list_next(view_schemas)) { + dbt= (struct db_table*) view_schemas->data; + dump_view(dbt->database, dbt->table, &conf); + g_free(dbt->table); + g_free(dbt->database); + g_free(dbt); + } + g_list_free(g_list_first(view_schemas)); + + for (schema_post= g_list_first(schema_post); schema_post; schema_post= g_list_next(schema_post)) { + sp= (struct schema_post*) schema_post->data; + dump_schema_post(sp->database, &conf); + g_free(sp->database); + g_free(sp); + } + g_list_free(g_list_first(schema_post)); + + if (!no_locks && !trx_consistency_only) { + g_async_queue_pop(conf.unlock_tables); + g_message("Non-InnoDB dump complete, unlocking tables"); + mysql_query(conn, "UNLOCK TABLES /* FTWRL */"); + } + #ifdef WITH_BINLOG + if (need_binlogs) { + get_binlogs(conn, &conf); + } + #endif + // close main connection + mysql_close(conn); + + if(less_locking){ + for (n=num_threads; ntype = JOB_SHUTDOWN; + g_async_queue_push(conf.queue,j); + } + + for (n=0; n 0) + fclose(nufile); + g_rename(p, p2); + g_free(p); + g_free(p2); + g_message("Finished dump at: %04d-%02d-%02d %02d:%02d:%02d\n", + tval.tm_year+1900, tval.tm_mon+1, tval.tm_mday, + tval.tm_hour, tval.tm_min, tval.tm_sec); + + g_free(td); + g_free(threads); +} + +void dump_create_database(MYSQL *conn, char *database){ + void* outfile = NULL; + char* filename; + char *query = NULL; + MYSQL_RES *result = NULL; + MYSQL_ROW row; + + if (daemon_mode) + filename = g_strdup_printf("%s/%d/%s-schema-create.sql%s", output_directory, dump_number, database, (compress_output?".gz":"")); + else + filename = g_strdup_printf("%s/%s-schema-create.sql%s", output_directory, database, (compress_output?".gz":"")); + + if (!compress_output) + outfile= g_fopen(filename, "w"); + else + outfile= (void*) gzopen(filename, "w"); + + if (!outfile) { + g_critical("Error: DB: %s Could not create output file %s (%d)", database, filename, errno); + errors++; + return; + } + + GString* statement = g_string_sized_new(statement_size); + + query= g_strdup_printf("SHOW CREATE DATABASE `%s`", database); + if (mysql_query(conn, query) || !(result= mysql_use_result(conn))) { + if(success_on_1146 && mysql_errno(conn) == 1146){ + g_warning("Error dumping create database (%s): %s", database, mysql_error(conn)); + }else{ + g_critical("Error dumping create database (%s): %s", database, mysql_error(conn)); + errors++; + } + g_free(query); + return; + } + + /* There should never be more than one row */ + row = mysql_fetch_row(result); + g_string_append(statement, row[1]); + g_string_append(statement, ";\n"); + if (!write_data((FILE *)outfile, statement)) { + g_critical("Could not write create database for %s", database); + errors++; + } + g_free(query); + + if (!compress_output) + fclose((FILE *)outfile); + else + gzclose((gzFile)outfile); + + + g_string_free(statement, TRUE); + if (result) + mysql_free_result(result); + + g_free(filename); + return; +} + +void get_not_updated(MYSQL *conn){ + MYSQL_RES *res=NULL; + MYSQL_ROW row; + + gchar *query = g_strdup_printf("SELECT CONCAT(TABLE_SCHEMA,'.',TABLE_NAME) FROM information_schema.TABLES WHERE UPDATE_TIME < NOW() - INTERVAL %d DAY",updated_since); + mysql_query(conn,query); + g_free(query); + + res = mysql_store_result(conn); + while((row = mysql_fetch_row(res))) + no_updated_tables = g_list_append(no_updated_tables, row[0]); +} + +/* Heuristic chunks building - based on estimates, produces list of ranges for datadumping + WORK IN PROGRESS +*/ +GList * get_chunks_for_table(MYSQL *conn, char *database, char *table, struct configuration *conf) { + + GList *chunks = NULL; + MYSQL_RES *indexes=NULL, *minmax=NULL, *total=NULL; + MYSQL_ROW row; + char *field = NULL; + int showed_nulls=0; + + /* first have to pick index, in future should be able to preset in configuration too */ + gchar *query = g_strdup_printf("SHOW INDEX FROM `%s`.`%s`",database,table); + mysql_query(conn,query); + g_free(query); + indexes=mysql_store_result(conn); + + while ((row=mysql_fetch_row(indexes))) { + if (!strcmp(row[2],"PRIMARY") && (!strcmp(row[3],"1"))) { + /* Pick first column in PK, cardinality doesn't matter */ + field=row[4]; + break; + } + } + + /* If no PK found, try using first UNIQUE index */ + if (!field) { + mysql_data_seek(indexes,0); + while ((row=mysql_fetch_row(indexes))) { + if(!strcmp(row[1],"0") && (!strcmp(row[3],"1"))) { + /* Again, first column of any unique index */ + field=row[4]; + break; + } + } + } + + /* Still unlucky? Pick any high-cardinality index */ + if (!field && conf->use_any_index) { + guint64 max_cardinality=0; + guint64 cardinality=0; + + mysql_data_seek(indexes,0); + while ((row=mysql_fetch_row(indexes))) { + if(!strcmp(row[3],"1")) { + if (row[6]) + cardinality = strtoll(row[6],NULL,10); + if (cardinality>max_cardinality) { + field=row[4]; + max_cardinality=cardinality; + } + } + } + } + /* Oh well, no chunks today - no suitable index */ + if (!field) goto cleanup; + + /* Get minimum/maximum */ + mysql_query(conn, query=g_strdup_printf("SELECT %s MIN(`%s`),MAX(`%s`) FROM `%s`.`%s`", (detected_server == SERVER_TYPE_MYSQL) ? "/*!40001 SQL_NO_CACHE */" : "", field, field, database, table)); + g_free(query); + minmax=mysql_store_result(conn); + + if (!minmax) + goto cleanup; + + row=mysql_fetch_row(minmax); + MYSQL_FIELD * fields=mysql_fetch_fields(minmax); + char *min=row[0]; + char *max=row[1]; + + /* Got total number of rows, skip chunk logic if estimates are low */ + guint64 rows = estimate_count(conn, database, table, field, NULL, NULL); + if (rows <= rows_per_file) + goto cleanup; + + /* This is estimate, not to use as guarantee! Every chunk would have eventual adjustments */ + guint64 estimated_chunks = rows / rows_per_file; + guint64 estimated_step, nmin, nmax, cutoff; + + /* Support just bigger INTs for now, very dumb, no verify approach */ + switch (fields[0].type) { + case MYSQL_TYPE_LONG: + case MYSQL_TYPE_LONGLONG: + case MYSQL_TYPE_INT24: + /* static stepping */ + nmin = strtoll(min,NULL,10); + nmax = strtoll(max,NULL,10); + estimated_step = (nmax-nmin)/estimated_chunks+1; + cutoff = nmin; + while(cutoff<=nmax) { + chunks=g_list_append(chunks,g_strdup_printf("%s%s%s%s(`%s` >= %llu AND `%s` < %llu)", + !showed_nulls?"`":"", + !showed_nulls?field:"", + !showed_nulls?"`":"", + !showed_nulls?" IS NULL OR ":"", + field, (unsigned long long)cutoff, + field, (unsigned long long)(cutoff+estimated_step))); + cutoff+=estimated_step; + showed_nulls=1; + } + + default: + goto cleanup; + } + + +cleanup: + if (indexes) + mysql_free_result(indexes); + if (minmax) + mysql_free_result(minmax); + if (total) + mysql_free_result(total); + return chunks; +} + +/* Try to get EXPLAIN'ed estimates of row in resultset */ +guint64 estimate_count(MYSQL *conn, char *database, char *table, char *field, char *from, char *to) { + char *querybase, *query; + int ret; + + g_assert(conn && database && table); + + querybase = g_strdup_printf("EXPLAIN SELECT `%s` FROM `%s`.`%s`", (field?field:"*"), database, table); + if (from || to) { + g_assert(field != NULL); + char *fromclause=NULL, *toclause=NULL; + char *escaped; + if (from) { + escaped=g_new(char,strlen(from)*2+1); + mysql_real_escape_string(conn,escaped,from,strlen(from)); + fromclause = g_strdup_printf(" `%s` >= \"%s\" ", field, escaped); + g_free(escaped); + } + if (to) { + escaped=g_new(char,strlen(to)*2+1); + mysql_real_escape_string(conn,escaped,from,strlen(from)); + toclause = g_strdup_printf( " `%s` <= \"%s\"", field, escaped); + g_free(escaped); + } + query = g_strdup_printf("%s WHERE `%s` %s %s", querybase, (from?fromclause:""), ((from&&to)?"AND":""), (to?toclause:"")); + + if (toclause) g_free(toclause); + if (fromclause) g_free(fromclause); + ret=mysql_query(conn,query); + g_free(querybase); + g_free(query); + } else { + ret=mysql_query(conn,querybase); + g_free(querybase); + } + + if (ret) { + g_warning("Unable to get estimates for %s.%s: %s",database,table,mysql_error(conn)); + } + + MYSQL_RES * result = mysql_store_result(conn); + MYSQL_FIELD * fields = mysql_fetch_fields(result); + + guint i; + for (i=0; i1 kicks in only in case of 5.0 SHOW FULL TABLES or SHOW TABLE STATUS + row[1] == NULL if it is a view in 5.0 'SHOW TABLE STATUS' + row[1] == "VIEW" if it is a view in 5.0 'SHOW FULL TABLES' + */ + if ((detected_server == SERVER_TYPE_MYSQL) && ( row[ccol] == NULL || !strcmp(row[ccol],"VIEW") )) + is_view = 1; + + /* Check for broken tables, i.e. mrg with missing source tbl */ + if ( !is_view && row[ecol] == NULL ) { + g_warning("Broken table detected, please review: %s.%s", database, row[0]); + dump = 0; + } + + /* Skip ignored engines, handy for avoiding Merge, Federated or Blackhole :-) dumps */ + if (dump && ignore && !is_view) { + for (i = 0; ignore[i] != NULL; i++) { + if (g_ascii_strcasecmp(ignore[i], row[ecol]) == 0) { + dump = 0; + break; + } + } + } + if (!dump) + continue; + + /* In case of table-list option is enabled, check if table is part of the list */ + if (tables) { + int table_found=0; + for (i = 0; tables[i] != NULL; i++) + if (g_ascii_strcasecmp(tables[i], row[0]) == 0) + table_found = 1; + + if (!table_found) + dump = 0; + } + if (!dump) + continue; + + /* Special tables */ + if(g_ascii_strcasecmp(database, "mysql") == 0 && (g_ascii_strcasecmp(row[0], "general_log") == 0 || + g_ascii_strcasecmp(row[0], "slow_log") == 0 || + g_ascii_strcasecmp(row[0], "innodb_index_stats") == 0 || + g_ascii_strcasecmp(row[0], "innodb_table_stats") == 0)){ + dump=0; + continue; + } + + /* Checks PCRE expressions on 'database.table' string */ + if (regexstring && !check_regex(database,row[0])) + continue; + + /* Check if the table was recently updated */ + if(no_updated_tables && !is_view){ + iter = no_updated_tables; + for (iter= g_list_first(iter); iter; iter= g_list_next(iter)) { + if(g_ascii_strcasecmp (iter->data, g_strdup_printf("%s.%s", database, row[0])) == 0){ + g_message("NO UPDATED TABLE: %s.%s", database, row[0]); + fprintf(file, "%s.%s\n", database, row[0]); + dump=0; + } + } + } + if (!dump) + continue; + + /* Green light! */ + struct db_table *dbt = g_new(struct db_table, 1); + dbt->database= g_strdup(database); + dbt->table= g_strdup(row[0]); + if(!row[6]) + dbt->datalength = 0; + else + dbt->datalength = g_ascii_strtoull(row[6], NULL, 10); + //if is a view we care only about schema + if(!is_view){ + // with trx_consistency_only we dump all as innodb_tables + // and we can start right now + if(!no_data){ + if(row[ecol] != NULL && g_ascii_strcasecmp("MRG_MYISAM", row[ecol])){ + if (trx_consistency_only) { + dump_table(conn, dbt->database, dbt->table, conf, TRUE); + }else if (row[ecol] != NULL && !g_ascii_strcasecmp("InnoDB", row[ecol])) { + innodb_tables= g_list_append(innodb_tables, dbt); + }else if(row[ecol] != NULL && !g_ascii_strcasecmp("TokuDB", row[ecol])){ + innodb_tables= g_list_append(innodb_tables, dbt); + } else { + non_innodb_table= g_list_append(non_innodb_table, dbt); + } + } + } + if (!no_schemas){ + table_schemas= g_list_append(table_schemas, dbt); + } + }else{ + if (!no_schemas){ + view_schemas= g_list_append(view_schemas, dbt); + } + } + } + + //Store Procedures and Events + //As these are not attached to tables we need to define when we need to dump or not + //Having regex filter make this hard because we dont now if a full schema is filtered or not + //Also I cant decide this based on tables from a schema being dumped + //So I will use only regex to dump or not SP and EVENTS + //I only need one match to dump all + + int post_dump = 0; + + if(dump_routines){ + //SP + query = g_strdup_printf("SHOW PROCEDURE STATUS WHERE Db = '%s'", database); + if (mysql_query(conn, (query))) { + g_critical("Error: DB: %s - Could not execute query: %s", database, mysql_error(conn)); + errors++; + return; + } + result = mysql_store_result(conn); + while ((row = mysql_fetch_row(result)) && !post_dump){ + /* Checks PCRE expressions on 'database.sp' string */ + if (regexstring && !check_regex(database,row[1])) + continue; + + post_dump = 1; + } + + if(!post_dump){ + //FUNCTIONS + query = g_strdup_printf("SHOW FUNCTION STATUS WHERE Db = '%s'", database); + if (mysql_query(conn, (query))) { + g_critical("Error: DB: %s - Could not execute query: %s", database, mysql_error(conn)); + errors++; + return; + } + result = mysql_store_result(conn); + while ((row = mysql_fetch_row(result)) && !post_dump){ + /* Checks PCRE expressions on 'database.sp' string */ + if (regexstring && !check_regex(database,row[1])) + continue; + + post_dump = 1; + } + } + } + + if(dump_events && !post_dump){ + //EVENTS + query = g_strdup_printf("SHOW EVENTS FROM `%s`", database); + if (mysql_query(conn, (query))) { + g_critical("Error: DB: %s - Could not execute query: %s", database, mysql_error(conn)); + errors++; + return; + } + result = mysql_store_result(conn); + while ((row = mysql_fetch_row(result)) && !post_dump){ + /* Checks PCRE expressions on 'database.sp' string */ + if (regexstring && !check_regex(database,row[1])) + continue; + + post_dump = 1; + } + } + + if(post_dump){ + struct schema_post *sp = g_new(struct schema_post, 1); + sp->database= g_strdup(database); + schema_post= g_list_append(schema_post, sp); + } + + g_free(query); + mysql_free_result(result); + if(file) + fflush(file); + + return; +} + +void get_tables(MYSQL * conn, struct configuration *conf) { + + gchar **dt= NULL; + char *query=NULL; + guint i,x; + + for (x = 0; tables[x] != NULL; x++){ + dt = g_strsplit(tables[x], ".", 0); + query= g_strdup_printf("SHOW TABLE STATUS FROM %s LIKE '%s'", dt[0], dt[1]); + + if (mysql_query(conn, (query))) { + g_critical("Error: DB: %s - Could not execute query: %s", dt[0], mysql_error(conn)); + errors++; + return; + } + + MYSQL_RES *result = mysql_store_result(conn); + MYSQL_FIELD *fields= mysql_fetch_fields(result); + guint ecol= -1; + guint ccol= -1; + for (i=0; idatabase= g_strdup(dt[0]); + dbt->table= g_strdup(dt[1]); + if(!row[6]) + dbt->datalength = 0; + else + dbt->datalength = g_ascii_strtoull(row[6], NULL, 10); + if(!is_view){ + if (trx_consistency_only) { + dump_table(conn, dbt->database, dbt->table, conf, TRUE); + }else if (!g_ascii_strcasecmp("InnoDB", row[ecol])) { + innodb_tables= g_list_append(innodb_tables, dbt); + }else if(!g_ascii_strcasecmp("TokuDB", row[ecol])){ + innodb_tables= g_list_append(innodb_tables, dbt); + } else { + non_innodb_table= g_list_append(non_innodb_table, dbt); + } + if (!no_schemas) { + table_schemas= g_list_append(table_schemas, dbt); + } + }else{ + if (!no_schemas){ + view_schemas= g_list_append(view_schemas, dbt); + } + } + } + } + g_free(query); +} + +void set_charset(GString* statement, char *character_set, char *collation_connection){ + g_string_printf(statement,"SET @PREV_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT;\n"); + g_string_append(statement,"SET @PREV_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS;\n"); + g_string_append(statement,"SET @PREV_COLLATION_CONNECTION=@@COLLATION_CONNECTION;\n"); + + g_string_append_printf(statement, "SET character_set_client = %s;\n", character_set); + g_string_append_printf(statement, "SET character_set_results = %s;\n", character_set); + g_string_append_printf(statement, "SET collation_connection = %s;\n", collation_connection); +} + +void restore_charset(GString* statement){ + g_string_append(statement,"SET character_set_client = @PREV_CHARACTER_SET_CLIENT;\n"); + g_string_append(statement,"SET character_set_results = @PREV_CHARACTER_SET_RESULTS;\n"); + g_string_append(statement,"SET collation_connection = @PREV_COLLATION_CONNECTION;\n"); +} + +void dump_schema_post_data(MYSQL *conn, char *database, char *filename){ + void *outfile; + char *query = NULL; + MYSQL_RES *result = NULL; + MYSQL_RES *result2 = NULL; + MYSQL_ROW row; + MYSQL_ROW row2; + gchar **splited_st= NULL; + + if (!compress_output) + outfile= g_fopen(filename, "w"); + else + outfile= (void*) gzopen(filename, "w"); + + if (!outfile) { + g_critical("Error: DB: %s Could not create output file %s (%d)", database, filename, errno); + errors++; + return; + } + + GString* statement = g_string_sized_new(statement_size); + + if(dump_routines){ + // get functions + query= g_strdup_printf("SHOW FUNCTION STATUS WHERE Db = '%s'", database); + if (mysql_query(conn, query) || !(result= mysql_store_result(conn))) { + if(success_on_1146 && mysql_errno(conn) == 1146){ + g_warning("Error dumping functions from %s: %s", database, mysql_error(conn)); + }else{ + g_critical("Error dumping functions from %s: %s", database, mysql_error(conn)); + errors++; + } + g_free(query); + return; + } + + while((row = mysql_fetch_row(result))){ + set_charset(statement, row[8], row[9]); + g_string_append_printf(statement,"DROP FUNCTION IF EXISTS `%s`;\n",row[1]); + if (!write_data((FILE *)outfile,statement)) { + g_critical("Could not write stored procedure data for %s.%s", database,row[1] ); + errors++; + return; + } + g_string_set_size(statement, 0); + query= g_strdup_printf("SHOW CREATE FUNCTION `%s`.`%s`", database, row[1]); + mysql_query(conn, query); + result2= mysql_store_result(conn); + row2 = mysql_fetch_row(result2); + g_string_printf(statement,"%s",row2[2]); + splited_st = g_strsplit(statement->str,";\n",0); + g_string_printf(statement, "%s", g_strjoinv("; \n", splited_st)); + g_string_append(statement, ";\n"); + restore_charset(statement); + if (!write_data((FILE *)outfile,statement)) { + g_critical("Could not write function data for %s.%s", database,row[1] ); + errors++; + return; + } + g_string_set_size(statement, 0); + } + + // get sp + query= g_strdup_printf("SHOW PROCEDURE STATUS WHERE Db = '%s'", database); + if (mysql_query(conn, query) || !(result= mysql_store_result(conn))) { + if(success_on_1146 && mysql_errno(conn) == 1146){ + g_warning("Error dumping stored procedures from %s: %s", database, mysql_error(conn)); + }else{ + g_critical("Error dumping stored procedures from %s: %s", database, mysql_error(conn)); + errors++; + } + g_free(query); + return; + } + + while((row = mysql_fetch_row(result))){ + set_charset(statement, row[8], row[9]); + g_string_append_printf(statement,"DROP PROCEDURE IF EXISTS `%s`;\n",row[1]); + if (!write_data((FILE *)outfile,statement)) { + g_critical("Could not write stored procedure data for %s.%s", database,row[1] ); + errors++; + return; + } + g_string_set_size(statement, 0); + query= g_strdup_printf("SHOW CREATE PROCEDURE `%s`.`%s`", database, row[1]); + mysql_query(conn, query); + result2= mysql_store_result(conn); + row2 = mysql_fetch_row(result2); + g_string_printf(statement,"%s",row2[2]); + splited_st = g_strsplit(statement->str,";\n",0); + g_string_printf(statement, "%s", g_strjoinv("; \n", splited_st)); + g_string_append(statement, ";\n"); + restore_charset(statement); + if (!write_data((FILE *)outfile,statement)) { + g_critical("Could not write stored procedure data for %s.%s", database,row[1] ); + errors++; + return; + } + g_string_set_size(statement, 0); + } + } + + // get events + if(dump_events){ + query= g_strdup_printf("SHOW EVENTS FROM `%s`", database); + if (mysql_query(conn, query) || !(result= mysql_store_result(conn))) { + if(success_on_1146 && mysql_errno(conn) == 1146){ + g_warning("Error dumping events from %s: %s", database, mysql_error(conn)); + }else{ + g_critical("Error dumping events from %s: %s", database, mysql_error(conn)); + errors++; + } + g_free(query); + return; + } + + while((row = mysql_fetch_row(result))){ + set_charset(statement, row[12], row[13]); + g_string_append_printf(statement,"DROP EVENT IF EXISTS `%s`;\n",row[1]); + if (!write_data((FILE *)outfile,statement)) { + g_critical("Could not write stored procedure data for %s.%s", database,row[1] ); + errors++; + return; + } + query= g_strdup_printf("SHOW CREATE EVENT `%s`.`%s`", database, row[1]); + mysql_query(conn, query); + result2= mysql_store_result(conn); + //DROP EVENT IF EXISTS event_name + row2 = mysql_fetch_row(result2); + g_string_printf(statement,"%s",row2[3]); + splited_st = g_strsplit(statement->str,";\n",0); + g_string_printf(statement, "%s", g_strjoinv("; \n", splited_st)); + g_string_append(statement, ";\n"); + restore_charset(statement); + if (!write_data((FILE *)outfile,statement)) { + g_critical("Could not write event data for %s.%s", database,row[1] ); + errors++; + return; + } + g_string_set_size(statement, 0); + } + } + + g_free(query); + + if (!compress_output) + fclose((FILE *)outfile); + else + gzclose((gzFile)outfile); + + g_string_free(statement, TRUE); + g_strfreev(splited_st); + if (result) + mysql_free_result(result); + if (result2) + mysql_free_result(result2); + + return; + +} +void dump_triggers_data(MYSQL *conn, char *database, char *table, char *filename){ + void *outfile; + char *query = NULL; + MYSQL_RES *result = NULL; + MYSQL_RES *result2 = NULL; + MYSQL_ROW row; + MYSQL_ROW row2; + gchar **splited_st= NULL; + + if (!compress_output) + outfile= g_fopen(filename, "w"); + else + outfile= (void*) gzopen(filename, "w"); + + if (!outfile) { + g_critical("Error: DB: %s Could not create output file %s (%d)", database, filename, errno); + errors++; + return; + } + + GString* statement = g_string_sized_new(statement_size); + + // get triggers + query= g_strdup_printf("SHOW TRIGGERS FROM `%s` LIKE '%s'", database, table); + if (mysql_query(conn, query) || !(result= mysql_store_result(conn))) { + if(success_on_1146 && mysql_errno(conn) == 1146){ + g_warning("Error dumping triggers (%s.%s): %s", database, table, mysql_error(conn)); + }else{ + g_critical("Error dumping triggers (%s.%s): %s", database, table, mysql_error(conn)); + errors++; + } + g_free(query); + return; + } + + while ((row = mysql_fetch_row(result))) { + set_charset(statement, row[8], row[9]); + if (!write_data((FILE *)outfile,statement)) { + g_critical("Could not write triggers data for %s.%s", database, table); + errors++; + return; + } + g_string_set_size(statement, 0); + query= g_strdup_printf("SHOW CREATE TRIGGER `%s`.`%s`", database, row[0]); + mysql_query(conn, query); + result2= mysql_store_result(conn); + row2 = mysql_fetch_row(result2); + g_string_append_printf(statement,"%s",row2[2]); + splited_st = g_strsplit(statement->str,";\n",0); + g_string_printf(statement, "%s", g_strjoinv("; \n", splited_st)); + g_string_append(statement, ";\n"); + restore_charset(statement); + if (!write_data((FILE *)outfile,statement)) { + g_critical("Could not write triggers data for %s.%s", database, table); + errors++; + return; + } + g_string_set_size(statement, 0); + } + + g_free(query); + + if (!compress_output) + fclose((FILE *)outfile); + else + gzclose((gzFile)outfile); + + g_string_free(statement, TRUE); + g_strfreev(splited_st); + if (result) + mysql_free_result(result); + if (result2) + mysql_free_result(result2); + + return; +} +void dump_schema_data(MYSQL *conn, char *database, char *table, char *filename) { + void *outfile; + char *query = NULL; + MYSQL_RES *result = NULL; + MYSQL_ROW row; + + if (!compress_output) + outfile= g_fopen(filename, "w"); + else + outfile= (void*) gzopen(filename, "w"); + + if (!outfile) { + g_critical("Error: DB: %s Could not create output file %s (%d)", database, filename, errno); + errors++; + return; + } + + GString* statement = g_string_sized_new(statement_size); + + if (detected_server == SERVER_TYPE_MYSQL) { + g_string_printf(statement,"/*!40101 SET NAMES binary*/;\n"); + g_string_append(statement,"/*!40014 SET FOREIGN_KEY_CHECKS=0*/;\n\n"); + } else { + g_string_printf(statement, "SET FOREIGN_KEY_CHECKS=0;\n"); + } + + if (!write_data((FILE *)outfile,statement)) { + g_critical("Could not write schema data for %s.%s", database, table); + errors++; + return; + } + + query= g_strdup_printf("SHOW CREATE TABLE `%s`.`%s`", database, table); + if (mysql_query(conn, query) || !(result= mysql_use_result(conn))) { + if(success_on_1146 && mysql_errno(conn) == 1146){ + g_warning("Error dumping schemas (%s.%s): %s", database, table, mysql_error(conn)); + }else{ + g_critical("Error dumping schemas (%s.%s): %s", database, table, mysql_error(conn)); + errors++; + } + g_free(query); + return; + } + + + g_string_set_size(statement, 0); + + /* There should never be more than one row */ + row = mysql_fetch_row(result); + g_string_append(statement, row[1]); + g_string_append(statement, ";\n"); + if (!write_data((FILE *)outfile, statement)) { + g_critical("Could not write schema for %s.%s", database, table); + errors++; + } + g_free(query); + + if (!compress_output) + fclose((FILE *)outfile); + else + gzclose((gzFile)outfile); + + + g_string_free(statement, TRUE); + if (result) + mysql_free_result(result); + + return; +} + +void dump_view_data(MYSQL *conn, char *database, char *table, char *filename, char *filename2) { + void *outfile, *outfile2; + char *query = NULL; + MYSQL_RES *result = NULL; + MYSQL_ROW row; + GString* statement = g_string_sized_new(statement_size); + + mysql_select_db(conn,database); + + if (!compress_output){ + outfile= g_fopen(filename, "w"); + outfile2= g_fopen(filename2, "w"); + }else{ + outfile= (void*) gzopen(filename, "w"); + outfile2= (void*) gzopen(filename2, "w"); + } + + if (!outfile || !outfile2) { + g_critical("Error: DB: %s Could not create output file (%d)", database, errno); + errors++; + return; + } + + if (detected_server == SERVER_TYPE_MYSQL) { + g_string_printf(statement,"/*!40101 SET NAMES binary*/;\n"); + } + + if (!write_data((FILE *)outfile,statement)) { + g_critical("Could not write schema data for %s.%s", database, table); + errors++; + return; + } + + g_string_append_printf(statement, "DROP TABLE IF EXISTS `%s`;\n", table); + g_string_append_printf(statement, "DROP VIEW IF EXISTS `%s`;\n", table); + + if (!write_data((FILE *)outfile2,statement)) { + g_critical("Could not write schema data for %s.%s", database, table); + errors++; + return; + } + + //we create myisam tables as workaround + //for view dependencies + query= g_strdup_printf("SHOW FIELDS FROM `%s`.`%s`", database, table); + if (mysql_query(conn, query) || !(result= mysql_use_result(conn))) { + if(success_on_1146 && mysql_errno(conn) == 1146){ + g_warning("Error dumping schemas (%s.%s): %s", database, table, mysql_error(conn)); + }else{ + g_critical("Error dumping schemas (%s.%s): %s", database, table, mysql_error(conn)); + errors++; + } + g_free(query); + return; + } + g_string_set_size(statement, 0); + g_string_append_printf(statement, "CREATE TABLE `%s`(\n", table); + row = mysql_fetch_row(result); + g_string_append_printf(statement, "`%s` int", row[0]); + while ((row = mysql_fetch_row(result))) { + g_string_append(statement, ",\n"); + g_string_append_printf(statement, "`%s` int", row[0]); + } + g_string_append(statement, "\n)ENGINE=MyISAM;\n"); + + if (!write_data((FILE *)outfile, statement)) { + g_critical("Could not write view schema for %s.%s", database, table); + errors++; + } + + //real view + query= g_strdup_printf("SHOW CREATE VIEW `%s`.`%s`", database, table); + if (mysql_query(conn, query) || !(result= mysql_use_result(conn))) { + if(success_on_1146 && mysql_errno(conn) == 1146){ + g_warning("Error dumping schemas (%s.%s): %s", database, table, mysql_error(conn)); + }else{ + g_critical("Error dumping schemas (%s.%s): %s", database, table, mysql_error(conn)); + errors++; + } + g_free(query); + return; + } + g_string_set_size(statement, 0); + + /* There should never be more than one row */ + row = mysql_fetch_row(result); + set_charset(statement, row[2], row[3]); + g_string_append(statement, row[1]); + g_string_append(statement, ";\n"); + restore_charset(statement); + if (!write_data((FILE *)outfile2, statement)) { + g_critical("Could not write schema for %s.%s", database, table); + errors++; + } + g_free(query); + + if (!compress_output){ + fclose((FILE *)outfile); + fclose((FILE *)outfile2); + }else{ + gzclose((gzFile)outfile); + gzclose((gzFile)outfile2); + } + + g_string_free(statement, TRUE); + if (result) + mysql_free_result(result); + + return; +} + +void dump_table_data_file(MYSQL *conn, char *database, char *table, char *where, char *filename) { + void *outfile; + + if (!compress_output) + outfile = g_fopen(filename, "w"); + else + outfile = (void*) gzopen(filename, "w"); + + if (!outfile) { + g_critical("Error: DB: %s TABLE: %s Could not create output file %s (%d)", database, table, filename, errno); + errors++; + return; + } + guint64 rows_count = dump_table_data(conn, (FILE *)outfile, database, table, where, filename); + + if (!rows_count) + g_message("Empty table %s.%s", database,table); +} + +void dump_schema(MYSQL *conn, char *database, char *table, struct configuration *conf) { + struct job *j = g_new0(struct job,1); + struct schema_job *sj = g_new0(struct schema_job,1); + j->job_data=(void*) sj; + sj->database=g_strdup(database); + sj->table=g_strdup(table); + j->conf=conf; + j->type=JOB_SCHEMA; + if (daemon_mode) + sj->filename = g_strdup_printf("%s/%d/%s.%s-schema.sql%s", output_directory, dump_number, database, table, (compress_output?".gz":"")); + else + sj->filename = g_strdup_printf("%s/%s.%s-schema.sql%s", output_directory, database, table, (compress_output?".gz":"")); + g_async_queue_push(conf->queue,j); + + if(dump_triggers){ + char *query = NULL; + MYSQL_RES *result = NULL; + + query= g_strdup_printf("SHOW TRIGGERS FROM `%s` LIKE '%s'", database, table); + if (mysql_query(conn, query) || !(result= mysql_store_result(conn))) { + g_critical("Error Checking triggers for %s.%s. Err: %s", database, table, mysql_error(conn)); + errors++; + }else{ + if(mysql_num_rows(result)){ + struct job *t = g_new0(struct job,1); + struct schema_job *st = g_new0(struct schema_job,1); + t->job_data=(void*) st; + st->database=g_strdup(database); + st->table=g_strdup(table); + t->conf=conf; + t->type=JOB_TRIGGERS; + if (daemon_mode) + st->filename = g_strdup_printf("%s/%d/%s.%s-schema-triggers.sql%s", output_directory, dump_number, database, table, (compress_output?".gz":"")); + else + st->filename = g_strdup_printf("%s/%s.%s-schema-triggers.sql%s", output_directory, database, table, (compress_output?".gz":"")); + g_async_queue_push(conf->queue,t); + } + } + g_free(query); + if (result) { + mysql_free_result(result); + } + } + return; +} + +void dump_view(char *database, char *table, struct configuration *conf) { + struct job *j = g_new0(struct job,1); + struct view_job *vj = g_new0(struct view_job,1); + j->job_data=(void*) vj; + vj->database=g_strdup(database); + vj->table=g_strdup(table); + j->conf=conf; + j->type=JOB_VIEW; + if (daemon_mode){ + vj->filename = g_strdup_printf("%s/%d/%s.%s-schema.sql%s", output_directory, dump_number, database, table, (compress_output?".gz":"")); + vj->filename2 = g_strdup_printf("%s/%d/%s.%s-schema-view.sql%s", output_directory, dump_number, database, table, (compress_output?".gz":"")); + }else{ + vj->filename = g_strdup_printf("%s/%s.%s-schema.sql%s", output_directory, database, table, (compress_output?".gz":"")); + vj->filename2 = g_strdup_printf("%s/%s.%s-schema-view.sql%s", output_directory, database, table, (compress_output?".gz":"")); + } + g_async_queue_push(conf->queue,j); + return; +} + +void dump_schema_post(char *database, struct configuration *conf) { + struct job *j = g_new0(struct job,1); + struct schema_post_job *sp = g_new0(struct schema_post_job,1); + j->job_data=(void*) sp; + sp->database=g_strdup(database); + j->conf=conf; + j->type=JOB_SCHEMA_POST; + if (daemon_mode){ + sp->filename = g_strdup_printf("%s/%d/%s-schema-post.sql%s", output_directory, dump_number, database, (compress_output?".gz":"")); + }else{ + sp->filename = g_strdup_printf("%s/%s-schema-post.sql%s", output_directory, database, (compress_output?".gz":"")); + } + g_async_queue_push(conf->queue,j); + return; +} + +void dump_table(MYSQL *conn, char *database, char *table, struct configuration *conf, gboolean is_innodb) { + + GList * chunks = NULL; + if (rows_per_file) + chunks = get_chunks_for_table(conn, database, table, conf); + + + if (chunks) { + int nchunk=0; + for (chunks = g_list_first(chunks); chunks; chunks=g_list_next(chunks)) { + struct job *j = g_new0(struct job,1); + struct table_job *tj = g_new0(struct table_job,1); + j->job_data=(void*) tj; + tj->database=g_strdup(database); + tj->table=g_strdup(table); + j->conf=conf; + j->type= is_innodb ? JOB_DUMP : JOB_DUMP_NON_INNODB; + if (daemon_mode) + tj->filename=g_strdup_printf("%s/%d/%s.%s.%05d.sql%s", output_directory, dump_number, database, table, nchunk,(compress_output?".gz":"")); + else + tj->filename=g_strdup_printf("%s/%s.%s.%05d.sql%s", output_directory, database, table, nchunk,(compress_output?".gz":"")); + tj->where=(char *)chunks->data; + if (!is_innodb && nchunk) + g_atomic_int_inc(&non_innodb_table_counter); + g_async_queue_push(conf->queue,j); + nchunk++; + } + g_list_free(g_list_first(chunks)); + } else { + struct job *j = g_new0(struct job,1); + struct table_job *tj = g_new0(struct table_job,1); + j->job_data=(void*) tj; + tj->database=g_strdup(database); + tj->table=g_strdup(table); + j->conf=conf; + j->type= is_innodb ? JOB_DUMP : JOB_DUMP_NON_INNODB; + if (daemon_mode) + tj->filename = g_strdup_printf("%s/%d/%s.%s%s.sql%s", output_directory, dump_number, database, table,(chunk_filesize?".00001":""),(compress_output?".gz":"")); + else + tj->filename = g_strdup_printf("%s/%s.%s%s.sql%s", output_directory, database, table,(chunk_filesize?".00001":""),(compress_output?".gz":"")); + g_async_queue_push(conf->queue,j); + return; + } +} + +void dump_tables(MYSQL *conn, GList *noninnodb_tables_list, struct configuration *conf){ + struct db_table* dbt; + GList * chunks = NULL; + + struct job *j = g_new0(struct job,1); + struct tables_job *tjs = g_new0(struct tables_job,1); + j->conf=conf; + j->type=JOB_LOCK_DUMP_NON_INNODB; + j->job_data=(void*) tjs; + + for (noninnodb_tables_list= g_list_first(noninnodb_tables_list); noninnodb_tables_list; noninnodb_tables_list= g_list_next(noninnodb_tables_list)) { + dbt = (struct db_table*) noninnodb_tables_list->data; + + if (rows_per_file) + chunks = get_chunks_for_table(conn, dbt->database, dbt->table, conf); + + if(chunks){ + int nchunk=0; + for (chunks = g_list_first(chunks); chunks; chunks=g_list_next(chunks)) { + struct table_job *tj = g_new0(struct table_job,1); + tj->database = g_strdup_printf("%s",dbt->database); + tj->table = g_strdup_printf("%s",dbt->table); + if (daemon_mode) + tj->filename=g_strdup_printf("%s/%d/%s.%s.%05d.sql%s", output_directory, dump_number, dbt->database, dbt->table, nchunk,(compress_output?".gz":"")); + else + tj->filename=g_strdup_printf("%s/%s.%s.%05d.sql%s", output_directory, dbt->database, dbt->table, nchunk,(compress_output?".gz":"")); + tj->where=(char *)chunks->data; + tjs->table_job_list= g_list_append(tjs->table_job_list, tj); + nchunk++; + } + }else{ + struct table_job *tj = g_new0(struct table_job,1); + tj->database = g_strdup_printf("%s",dbt->database); + tj->table = g_strdup_printf("%s",dbt->table); + if (daemon_mode) + tj->filename = g_strdup_printf("%s/%d/%s.%s%s.sql%s", output_directory, dump_number, dbt->database, dbt->table,(chunk_filesize?".00001":""),(compress_output?".gz":"")); + else + tj->filename = g_strdup_printf("%s/%s.%s%s.sql%s", output_directory, dbt->database, dbt->table,(chunk_filesize?".00001":""),(compress_output?".gz":"")); + tj->where = NULL; + tjs->table_job_list= g_list_append(tjs->table_job_list, tj); + } + } + g_async_queue_push(conf->queue_less_locking,j); +} + +/* Do actual data chunk reading/writing magic */ +guint64 dump_table_data(MYSQL * conn, FILE *file, char *database, char *table, char *where, char *filename) +{ + guint i; + guint fn = 1; + guint st_in_file = 0; + guint num_fields = 0; + guint64 num_rows = 0; + guint64 num_rows_st = 0; + MYSQL_RES *result = NULL; + char *query = NULL; + gchar *fcfile = NULL; + gchar* filename_prefix = NULL; + + fcfile = g_strdup (filename); + + if(chunk_filesize){ + gchar** split_filename= g_strsplit(filename, ".00001.sql", 0); + filename_prefix= split_filename[0]; + g_free(split_filename); + } + + + /* Ghm, not sure if this should be statement_size - but default isn't too big for now */ + GString* statement = g_string_sized_new(statement_size); + GString* statement_row = g_string_sized_new(0); + + /* Poor man's database code */ + query = g_strdup_printf("SELECT %s * FROM `%s`.`%s` %s %s", (detected_server == SERVER_TYPE_MYSQL) ? "/*!40001 SQL_NO_CACHE */" : "", database, table, where?"WHERE":"",where?where:""); + if (mysql_query(conn, query) || !(result=mysql_use_result(conn))) { + //ERROR 1146 + if(success_on_1146 && mysql_errno(conn) == 1146){ + g_warning("Error dumping table (%s.%s) data: %s ",database, table, mysql_error(conn)); + }else{ + g_critical("Error dumping table (%s.%s) data: %s ",database, table, mysql_error(conn)); + errors++; + } + g_free(query); + return num_rows; + } + + num_fields = mysql_num_fields(result); + MYSQL_FIELD *fields = mysql_fetch_fields(result); + + /* Buffer for escaping field values */ + GString *escaped = g_string_sized_new(3000); + + MYSQL_ROW row; + + g_string_set_size(statement,0); + + /* Poor man's data dump code */ + while ((row = mysql_fetch_row(result))) { + gulong *lengths = mysql_fetch_lengths(result); + num_rows++; + + if (!statement->len){ + if(!st_in_file){ + if (detected_server == SERVER_TYPE_MYSQL) { + g_string_printf(statement,"/*!40101 SET NAMES binary*/;\n"); + g_string_append(statement,"/*!40014 SET FOREIGN_KEY_CHECKS=0*/;\n"); + if (!skip_tz) { + g_string_append(statement,"/*!40103 SET TIME_ZONE='+00:00' */;\n"); + } + } else { + g_string_printf(statement,"SET FOREIGN_KEY_CHECKS=0;\n"); + } + + if (!write_data(file,statement)) { + g_critical("Could not write out data for %s.%s", database, table); + return num_rows; + } + } + g_string_printf(statement, "INSERT INTO `%s` VALUES", table); + num_rows_st = 0; + } + + if (statement_row->len) { + g_string_append(statement, statement_row->str); + g_string_set_size(statement_row,0); + num_rows_st++; + } + + g_string_append(statement_row, "\n("); + + for (i = 0; i < num_fields; i++) { + /* Don't escape safe formats, saves some time */ + if (!row[i]) { + g_string_append(statement_row, "NULL"); + } else if (fields[i].flags & NUM_FLAG) { + g_string_append(statement_row, row[i]); + } else { + /* We reuse buffers for string escaping, growing is expensive just at the beginning */ + g_string_set_size(escaped, lengths[i]*2+1); + mysql_real_escape_string(conn, escaped->str, row[i], lengths[i]); + g_string_append_c(statement_row,'\"'); + g_string_append(statement_row,escaped->str); + g_string_append_c(statement_row,'\"'); + } + if (i < num_fields - 1) { + g_string_append_c(statement_row,','); + } else { + g_string_append_c(statement_row,')'); + /* INSERT statement is closed before over limit */ + if(statement->len+statement_row->len+1 > statement_size) { + if(num_rows_st == 0){ + g_string_append(statement, statement_row->str); + g_string_set_size(statement_row,0); + g_warning("Row bigger than statement_size for %s.%s", database, table); + } + g_string_append(statement,";\n"); + + if (!write_data(file,statement)) { + g_critical("Could not write out data for %s.%s", database, table); + goto cleanup; + }else{ + st_in_file++; + if(chunk_filesize && st_in_file*(guint)ceil((float)statement_size/1024/1024) > chunk_filesize){ + fn++; + fcfile = g_strdup_printf("%s.%05d.sql%s", filename_prefix,fn,(compress_output?".gz":"")); + if (!compress_output){ + fclose((FILE *)file); + file = g_fopen(fcfile, "w"); + } else { + gzclose((gzFile)file); + file = (void*) gzopen(fcfile, "w"); + } + st_in_file = 0; + } + } + g_string_set_size(statement,0); + } else { + if(num_rows_st) + g_string_append_c(statement,','); + g_string_append(statement, statement_row->str); + num_rows_st++; + g_string_set_size(statement_row,0); + } + } + } + } + if (mysql_errno(conn)) { + g_critical("Could not read data from %s.%s: %s", database, table, mysql_error(conn)); + errors++; + } + + if (statement_row->len > 0) { + /* this last row has not been written out */ + if (statement->len > 0) { + /* strange, should not happen */ + g_string_append(statement, statement_row->str); + } + else { + g_string_printf(statement, "INSERT INTO `%s` VALUES", table); + g_string_append(statement, statement_row->str); + } + } + + if (statement->len > 0) { + g_string_append(statement,";\n"); + if (!write_data(file,statement)) { + g_critical("Could not write out closing newline for %s.%s, now this is sad!", database, table); + goto cleanup; + } + st_in_file++; + } + +cleanup: + g_free(query); + + g_string_free(escaped,TRUE); + g_string_free(statement,TRUE); + + if (result) { + mysql_free_result(result); + } + + if (!compress_output){ + fclose((FILE *)file); + } else { + gzclose((gzFile)file); + } + + if (!st_in_file && !build_empty_files) { + // dropping the useless file + if (remove(fcfile)) { + g_warning("Failed to remove empty file : %s\n", fcfile); + } + }else if(chunk_filesize && fn == 1){ + fcfile = g_strdup_printf("%s.sql%s", filename_prefix,(compress_output?".gz":"")); + g_rename(filename, fcfile); + } + + g_free(filename_prefix); + g_free(fcfile); + + return num_rows; +} + +gboolean write_data(FILE* file,GString * data) { + size_t written= 0; + ssize_t r= 0; + + while (written < data->len) { + if (!compress_output) + r = write(fileno(file), data->str + written, data->len); + else + r = gzwrite((gzFile)file, data->str + written, data->len); + + if (r < 0) { + g_critical("Couldn't write data to a file: %s", strerror(errno)); + errors++; + return FALSE; + } + written += r; + } + + return TRUE; +} + +void write_log_file(const gchar *log_domain, GLogLevelFlags log_level, const gchar *message, gpointer user_data) { + (void) log_domain; + (void) user_data; + + gchar date[20]; + time_t rawtime; + struct tm timeinfo; + + time(&rawtime); + localtime_r(&rawtime, &timeinfo); + strftime(date, 20, "%Y-%m-%d %H:%M:%S", &timeinfo); + + GString* message_out = g_string_new(date); + if (log_level & G_LOG_LEVEL_DEBUG) { + g_string_append(message_out, " [DEBUG] - "); + } else if ((log_level & G_LOG_LEVEL_INFO) + || (log_level & G_LOG_LEVEL_MESSAGE)) { + g_string_append(message_out, " [INFO] - "); + } else if (log_level & G_LOG_LEVEL_WARNING) { + g_string_append(message_out, " [WARNING] - "); + } else if ((log_level & G_LOG_LEVEL_ERROR) + || (log_level & G_LOG_LEVEL_CRITICAL)) { + g_string_append(message_out, " [ERROR] - "); + } + + g_string_append_printf(message_out, "%s\n", message); + if (write(fileno(logoutfile), message_out->str, message_out->len) <= 0) { + fprintf(stderr, "Cannot write to log file with error %d. Exiting...", errno); + } + g_string_free(message_out, TRUE); +} diff --git a/mydumper.h b/mydumper.h new file mode 100644 index 0000000..69d5390 --- /dev/null +++ b/mydumper.h @@ -0,0 +1,100 @@ +/* + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + + Authors: Domas Mituzas, Facebook ( domas at fb dot com ) + Mark Leith, Oracle Corporation (mark dot leith at oracle dot com) + Andrew Hutchings, SkySQL (andrew at skysql dot com) + Max Bubenick, Percona RDBA (max dot bubenick at percona dot com) + +*/ + +#ifndef _mydumper_h +#define _mydumper_h + +enum job_type { JOB_SHUTDOWN, JOB_RESTORE, JOB_DUMP, JOB_DUMP_NON_INNODB, JOB_SCHEMA, JOB_VIEW, JOB_TRIGGERS, JOB_SCHEMA_POST, JOB_BINLOG, JOB_LOCK_DUMP_NON_INNODB }; + +struct configuration { + char use_any_index; + GAsyncQueue* queue; + GAsyncQueue* queue_less_locking; + GAsyncQueue* ready; + GAsyncQueue* ready_less_locking; + GAsyncQueue* unlock_tables; + GMutex* mutex; + int done; +}; + +struct thread_data { + struct configuration *conf; + guint thread_id; +}; + +struct job { + enum job_type type; + void *job_data; + struct configuration *conf; +}; + +struct table_job { + char *database; + char *table; + char *filename; + char *where; +}; + +struct tables_job { + GList* table_job_list; +}; + +struct schema_job { + char *database; + char *table; + char *filename; +}; + +struct view_job { + char *database; + char *table; + char *filename; + char *filename2; +}; + +struct schema_post_job { + char *database; + char *filename; +}; + +struct restore_job { + char *database; + char *table; + char *filename; +}; + +struct binlog_job { + char *filename; + guint64 start_position; + guint64 stop_position; +}; + +struct db_table { + char* database; + char* table; + guint64 datalength; +}; + +struct schema_post { + char* database; +}; + +#endif diff --git a/myloader.c b/myloader.c new file mode 100644 index 0000000..106a400 --- /dev/null +++ b/myloader.c @@ -0,0 +1,577 @@ +/* + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + + Authors: Andrew Hutchings, SkySQL (andrew at skysql dot com) +*/ + +#define _LARGEFILE64_SOURCE +#define _FILE_OFFSET_BITS 64 + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "common.h" +#include "myloader.h" +#include "config.h" + +guint commit_count= 1000; +gchar *directory= NULL; +gboolean overwrite_tables= FALSE; +gboolean enable_binlog= FALSE; +gchar *source_db= NULL; +static GMutex *init_mutex= NULL; + +guint errors= 0; + +gboolean read_data(FILE *file, gboolean is_compressed, GString *data, gboolean *eof); +void restore_data(MYSQL *conn, char *database, char *table, const char *filename, gboolean is_schema, gboolean need_use); +void *process_queue(struct thread_data *td); +void add_table(const gchar* filename, struct configuration *conf); +void add_schema(const gchar* filename, MYSQL *conn); +void restore_databases(struct configuration *conf, MYSQL *conn); +void restore_schema_view(MYSQL *conn); +void restore_schema_triggers(MYSQL *conn); +void restore_schema_post(MYSQL *conn); +void no_log(const gchar *log_domain, GLogLevelFlags log_level, const gchar *message, gpointer user_data); +void set_verbose(guint verbosity); +void create_database(MYSQL *conn, gchar *database); + +static GOptionEntry entries[] = +{ + { "directory", 'd', 0, G_OPTION_ARG_STRING, &directory, "Directory of the dump to import", NULL }, + { "queries-per-transaction", 'q', 0, G_OPTION_ARG_INT, &commit_count, "Number of queries per transaction, default 1000", NULL }, + { "overwrite-tables", 'o', 0, G_OPTION_ARG_NONE, &overwrite_tables, "Drop tables if they already exist", NULL }, + { "database", 'B', 0, G_OPTION_ARG_STRING, &db, "An alternative database to restore into", NULL }, + { "source-db", 's', 0, G_OPTION_ARG_STRING, &source_db, "Database to restore", NULL }, + { "enable-binlog", 'e', 0, G_OPTION_ARG_NONE, &enable_binlog, "Enable binary logging of the restore data", NULL }, + { NULL, 0, 0, G_OPTION_ARG_NONE, NULL, NULL, NULL } +}; + +void no_log(const gchar *log_domain, GLogLevelFlags log_level, const gchar *message, gpointer user_data) { + (void) log_domain; + (void) log_level; + (void) message; + (void) user_data; +} + +void set_verbose(guint verbosity) { + switch (verbosity) { + case 0: + g_log_set_handler(NULL, (GLogLevelFlags)(G_LOG_LEVEL_MASK), no_log, NULL); + break; + case 1: + g_log_set_handler(NULL, (GLogLevelFlags)(G_LOG_LEVEL_WARNING | G_LOG_LEVEL_MESSAGE), no_log, NULL); + break; + case 2: + g_log_set_handler(NULL, (GLogLevelFlags)(G_LOG_LEVEL_MESSAGE), no_log, NULL); + break; + default: + break; + } +} + +int main(int argc, char *argv[]) { + struct configuration conf= { NULL, NULL, NULL, 0 }; + + GError *error= NULL; + GOptionContext *context; + + g_thread_init(NULL); + + init_mutex= g_mutex_new(); + + if(db == NULL && source_db != NULL){ + db = g_strdup(source_db); + } + + context= g_option_context_new("multi-threaded MySQL loader"); + GOptionGroup *main_group= g_option_group_new("main", "Main Options", "Main Options", NULL, NULL); + g_option_group_add_entries(main_group, entries); + g_option_group_add_entries(main_group, common_entries); + g_option_context_set_main_group(context, main_group); + if (!g_option_context_parse(context, &argc, &argv, &error)) { + g_print("option parsing failed: %s, try --help\n", error->message); + exit(EXIT_FAILURE); + } + g_option_context_free(context); + + if (program_version) { + g_print("myloader %s, built against MySQL %s\n", VERSION, MYSQL_SERVER_VERSION); + exit(EXIT_SUCCESS); + } + + set_verbose(verbose); + + if (!directory) { + g_critical("a directory needs to be specified, see --help\n"); + exit(EXIT_FAILURE); + } else { + char *p= g_strdup_printf("%s/metadata", directory); + if (!g_file_test(p, G_FILE_TEST_EXISTS)) { + g_critical("the specified directory is not a mydumper backup\n"); + exit(EXIT_FAILURE); + } + } + + MYSQL *conn; + conn= mysql_init(NULL); + mysql_options(conn, MYSQL_READ_DEFAULT_GROUP, "myloader"); + + if (!mysql_real_connect(conn, hostname, username, password, NULL, port, socket_path, 0)) { + g_critical("Error connection to database: %s", mysql_error(conn)); + exit(EXIT_FAILURE); + } + + if (mysql_query(conn, "SET SESSION wait_timeout = 2147483")){ + g_warning("Failed to increase wait_timeout: %s", mysql_error(conn)); + } + + if (!enable_binlog) + mysql_query(conn, "SET SQL_LOG_BIN=0"); + + mysql_query(conn, "/*!40014 SET FOREIGN_KEY_CHECKS=0*/"); + conf.queue= g_async_queue_new(); + conf.ready= g_async_queue_new(); + + guint n; + GThread **threads= g_new(GThread*, num_threads); + struct thread_data *td= g_new(struct thread_data, num_threads); + for (n= 0; n < num_threads; n++) { + td[n].conf= &conf; + td[n].thread_id= n+1; + threads[n]= g_thread_create((GThreadFunc)process_queue, &td[n], TRUE, NULL); + g_async_queue_pop(conf.ready); + } + g_async_queue_unref(conf.ready); + + g_message("%d threads created", num_threads); + + restore_databases(&conf, conn); + + for (n= 0; n < num_threads; n++) { + struct job *j= g_new0(struct job, 1); + j->type = JOB_SHUTDOWN; + g_async_queue_push(conf.queue, j); + } + + for (n= 0; n < num_threads; n++) { + g_thread_join(threads[n]); + } + + restore_schema_post(conn); + + restore_schema_view(conn); + + restore_schema_triggers(conn); + + g_async_queue_unref(conf.queue); + mysql_close(conn); + mysql_thread_end(); + mysql_library_end(); + g_free(directory); + g_free(td); + g_free(threads); + + return errors ? EXIT_FAILURE : EXIT_SUCCESS; +} + +void restore_databases(struct configuration *conf, MYSQL *conn) { + GError *error= NULL; + GDir* dir= g_dir_open(directory, 0, &error); + + if (error) { + g_critical("cannot open directory %s, %s\n", directory, error->message); + errors++; + return; + } + + const gchar* filename= NULL; + + while((filename= g_dir_read_name(dir))) { + if (!source_db || g_str_has_prefix(filename, g_strdup_printf("%s.", source_db))){ + if (g_strrstr(filename, "-schema.sql")) { + add_schema(filename, conn); + } + } + } + + g_dir_rewind(dir); + + while((filename= g_dir_read_name(dir))) { + if (!source_db || g_str_has_prefix(filename, g_strdup_printf("%s.", source_db))){ + if (!g_strrstr(filename, "-schema.sql") + && !g_strrstr(filename, "-schema-view.sql") + && !g_strrstr(filename, "-schema-triggers.sql") + && !g_strrstr(filename, "-schema-post.sql") + && !g_strrstr(filename, "-schema-create.sql") + && g_strrstr(filename, ".sql")) { + add_table(filename, conf); + } + } + } + + g_dir_close(dir); +} + + +void restore_schema_view(MYSQL *conn){ + GError *error= NULL; + GDir* dir= g_dir_open(directory, 0, &error); + + if (error) { + g_critical("cannot open directory %s, %s\n", directory, error->message); + errors++; + return; + } + + const gchar* filename= NULL; + + while((filename= g_dir_read_name(dir))) { + if (!source_db || g_str_has_prefix(filename, source_db)){ + if (g_strrstr(filename, "-schema-view.sql")) { + add_schema(filename, conn); + } + } + } + + g_dir_close(dir); +} + +void restore_schema_triggers(MYSQL *conn){ + GError *error= NULL; + GDir* dir= g_dir_open(directory, 0, &error); + gchar** split_file= NULL; + gchar* database=NULL; + gchar** split_table= NULL; + gchar* table= NULL; + + if (error) { + g_critical("cannot open directory %s, %s\n", directory, error->message); + errors++; + return; + } + + const gchar* filename= NULL; + + while((filename= g_dir_read_name(dir))) { + if (!source_db || g_str_has_prefix(filename, source_db)){ + if (g_strrstr(filename, "-schema-triggers.sql")) { + split_file= g_strsplit(filename, ".", 0); + database= split_file[0]; + split_table= g_strsplit(split_file[1], "-schema", 0); + table= split_table[0]; + g_message("Restoring triggers for `%s`.`%s`", db ? db : database, table); + restore_data(conn, database, table, filename, TRUE, TRUE); + } + } + } + + g_strfreev(split_table); + g_strfreev(split_file); + g_dir_close(dir); +} + +void restore_schema_post(MYSQL *conn){ + GError *error= NULL; + GDir* dir= g_dir_open(directory, 0, &error); + gchar** split_file= NULL; + gchar* database=NULL; + //gchar* table=NULL; + + + if (error) { + g_critical("cannot open directory %s, %s\n", directory, error->message); + errors++; + return; + } + + const gchar* filename= NULL; + + while((filename= g_dir_read_name(dir))) { + if (!source_db || g_str_has_prefix(filename, source_db)){ + if (g_strrstr(filename, "-schema-post.sql")) { + split_file= g_strsplit(filename, "-schema-post.sql", 0); + database= split_file[0]; + //table= split_file[0]; //NULL + g_message("Restoring routines and events for `%s`", db ? db : database); + restore_data(conn, database, NULL, filename, TRUE, TRUE); + } + } + } + + g_strfreev(split_file); + g_dir_close(dir); +} + +void create_database(MYSQL *conn, gchar *database){ + + gchar* query = NULL; + + if((db == NULL && source_db == NULL) || (db != NULL && source_db != NULL && !g_ascii_strcasecmp(db, source_db))){ + const gchar* filename= g_strdup_printf("%s-schema-create.sql", db ? db : database); + const gchar* filenamegz= g_strdup_printf("%s-schema-create.sql.gz", db ? db : database); + + if (g_file_test (filename, G_FILE_TEST_EXISTS)){ + restore_data(conn, database, NULL, filename, TRUE, FALSE); + }else if (g_file_test (filenamegz, G_FILE_TEST_EXISTS)){ + restore_data(conn, database, NULL, filenamegz, TRUE, FALSE); + }else{ + query= g_strdup_printf("CREATE DATABASE `%s`", db ? db : database); + mysql_query(conn, query); + } + }else{ + query= g_strdup_printf("CREATE DATABASE `%s`", db ? db : database); + mysql_query(conn, query); + } + + g_free(query); + return; +} + +void add_schema(const gchar* filename, MYSQL *conn) { + // 0 is database, 1 is table with -schema on the end + gchar** split_file= g_strsplit(filename, ".", 0); + gchar* database= split_file[0]; + // Remove the -schema from the table name + gchar** split_table= g_strsplit(split_file[1], "-schema", 0); + gchar* table= split_table[0]; + + gchar* query= g_strdup_printf("SHOW CREATE DATABASE `%s`", db ? db : database); + if (mysql_query(conn, query)) { + g_message("Creating database `%s`", db ? db : database); + create_database(conn, database); + } else { + MYSQL_RES *result= mysql_store_result(conn); + // In drizzle the query succeeds with no rows + my_ulonglong row_count= mysql_num_rows(result); + mysql_free_result(result); + if (row_count == 0) { + create_database(conn, database); + } + } + + if (overwrite_tables) { + g_message("Dropping table or view (if exists) `%s`.`%s`", db ? db : database, table); + query= g_strdup_printf("DROP TABLE IF EXISTS `%s`.`%s`", db ? db : database, table); + mysql_query(conn, query); + query= g_strdup_printf("DROP VIEW IF EXISTS `%s`.`%s`", db ? db : database, table); + mysql_query(conn, query); + } + + g_free(query); + + g_message("Creating table `%s`.`%s`", db ? db : database, table); + restore_data(conn, database, table, filename, TRUE, TRUE); + g_strfreev(split_table); + g_strfreev(split_file); + return; +} + +void add_table(const gchar* filename, struct configuration *conf) { + struct job *j= g_new0(struct job, 1); + struct restore_job *rj= g_new(struct restore_job, 1); + j->job_data= (void*) rj; + rj->filename= g_strdup(filename); + j->type= JOB_RESTORE; + gchar** split_file= g_strsplit(filename, ".", 0); + rj->database= g_strdup(split_file[0]); + rj->table= g_strdup(split_file[1]); + rj->part= g_ascii_strtoull(split_file[2], NULL, 10); + g_async_queue_push(conf->queue, j); + return; +} + +void *process_queue(struct thread_data *td) { + struct configuration *conf= td->conf; + g_mutex_lock(init_mutex); + MYSQL *thrconn= mysql_init(NULL); + g_mutex_unlock(init_mutex); + + mysql_options(thrconn, MYSQL_READ_DEFAULT_GROUP, "myloader"); + + if (compress_protocol) + mysql_options(thrconn, MYSQL_OPT_COMPRESS, NULL); + + if (!mysql_real_connect(thrconn, hostname, username, password, NULL, port, socket_path, 0)) { + g_critical("Failed to connect to MySQL server: %s", mysql_error(thrconn)); + exit(EXIT_FAILURE); + } + + if (mysql_query(thrconn, "SET SESSION wait_timeout = 2147483")){ + g_warning("Failed to increase wait_timeout: %s", mysql_error(thrconn)); + } + + if (!enable_binlog) + mysql_query(thrconn, "SET SQL_LOG_BIN=0"); + + mysql_query(thrconn, "/*!40101 SET NAMES binary*/"); + mysql_query(thrconn, "/*!40101 SET SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */"); + mysql_query(thrconn, "/*!40014 SET UNIQUE_CHECKS=0 */"); + mysql_query(thrconn, "SET autocommit=0"); + + g_async_queue_push(conf->ready, GINT_TO_POINTER(1)); + + struct job* job= NULL; + struct restore_job* rj= NULL; + for(;;) { + job= (struct job*)g_async_queue_pop(conf->queue); + + switch (job->type) { + case JOB_RESTORE: + rj= (struct restore_job *)job->job_data; + g_message("Thread %d restoring `%s`.`%s` part %d", td->thread_id, rj->database, rj->table, rj->part); + restore_data(thrconn, rj->database, rj->table, rj->filename, FALSE, TRUE); + if (rj->database) g_free(rj->database); + if (rj->table) g_free(rj->table); + if (rj->filename) g_free(rj->filename); + g_free(rj); + g_free(job); + break; + case JOB_SHUTDOWN: + g_message("Thread %d shutting down", td->thread_id); + if (thrconn) + mysql_close(thrconn); + g_free(job); + mysql_thread_end(); + return NULL; + break; + default: + g_critical("Something very bad happened!"); + exit(EXIT_FAILURE); + } + } + if (thrconn) + mysql_close(thrconn); + mysql_thread_end(); + return NULL; +} + +void restore_data(MYSQL *conn, char *database, char *table, const char *filename, gboolean is_schema, gboolean need_use) { + void *infile; + gboolean is_compressed= FALSE; + gboolean eof= FALSE; + guint query_counter= 0; + GString *data= g_string_sized_new(512); + + gchar* path= g_build_filename(directory, filename, NULL); + + if (!g_str_has_suffix(path, ".gz")) { + infile= g_fopen(path, "r"); + is_compressed= FALSE; + } else { + infile= (void*) gzopen(path, "r"); + is_compressed= TRUE; + } + + if (!infile) { + g_critical("cannot open file %s (%d)", filename, errno); + errors++; + return; + } + + + if(need_use){ + gchar *query= g_strdup_printf("USE `%s`", db ? db : database); + + if (mysql_query(conn, query)) { + g_critical("Error switching to database %s whilst restoring table %s", db ? db : database, table); + g_free(query); + errors++; + return; + } + + g_free(query); + } + + + if (!is_schema) + mysql_query(conn, "START TRANSACTION"); + + while (eof == FALSE) { + if (read_data(infile, is_compressed, data, &eof)) { + // Search for ; in last 5 chars of line + if (g_strrstr(&data->str[data->len >= 5 ? data->len - 5 : 0], ";\n")) { + if (mysql_real_query(conn, data->str, data->len)) { + g_critical("Error restoring %s.%s from file %s: %s", db ? db : database, table, filename, mysql_error(conn)); + errors++; + return; + } + query_counter++; + if (!is_schema &&(query_counter == commit_count)) { + query_counter= 0; + if (mysql_query(conn, "COMMIT")) { + g_critical("Error committing data for %s.%s: %s", db ? db : database, table, mysql_error(conn)); + errors++; + return; + } + mysql_query(conn, "START TRANSACTION"); + } + + g_string_set_size(data, 0); + } + } else { + g_critical("error reading file %s (%d)", filename, errno); + errors++; + return; + } + } + if (!is_schema && mysql_query(conn, "COMMIT")) { + g_critical("Error committing data for %s.%s from file %s: %s", db ? db : database, table, filename, mysql_error(conn)); + errors++; + } + g_string_free(data, TRUE); + g_free(path); + if (!is_compressed) { + fclose(infile); + } else { + gzclose((gzFile)infile); + } + return; +} + +gboolean read_data(FILE *file, gboolean is_compressed, GString *data, gboolean *eof) { + char buffer[256]; + + do { + if (!is_compressed) { + if (fgets(buffer, 256, file) == NULL) { + if (feof(file)) { + *eof= TRUE; + buffer[0]= '\0'; + } else { + return FALSE; + } + } + } else { + if (!gzgets((gzFile)file, buffer, 256)) { + if (gzeof((gzFile)file)) { + *eof= TRUE; + buffer[0]= '\0'; + } else { + return FALSE; + } + } + } + g_string_append(data, buffer); + } while ((buffer[strlen(buffer)] != '\0') && *eof == FALSE); + + return TRUE; +} diff --git a/myloader.h b/myloader.h new file mode 100644 index 0000000..b5a110b --- /dev/null +++ b/myloader.h @@ -0,0 +1,51 @@ +/* + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + + Authors: Domas Mituzas, Facebook ( domas at fb dot com ) + Mark Leith, Oracle Corporation (mark dot leith at oracle dot com) + Andrew Hutchings, SkySQL (andrew at skysql dot com) + +*/ + +#ifndef _myloader_h +#define _myloader_h + +enum job_type { JOB_SHUTDOWN, JOB_RESTORE }; + +struct configuration { + GAsyncQueue* queue; + GAsyncQueue* ready; + GMutex* mutex; + int done; +}; + +struct thread_data { + struct configuration *conf; + guint thread_id; +}; + +struct job { + enum job_type type; + void *job_data; + struct configuration *conf; +}; + +struct restore_job { + char *database; + char *table; + char *filename; + guint part; +}; + +#endif diff --git a/server_detect.c b/server_detect.c new file mode 100644 index 0000000..63a0332 --- /dev/null +++ b/server_detect.c @@ -0,0 +1,71 @@ +/* + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + + Authors: Andrew Hutchings, SkySQL (andrew at skysql dot com) +*/ + +#include +#include +#include +#include "server_detect.h" + +int detect_server(MYSQL *conn) { + pcre *re= NULL; + const char *error; + int erroroffset; + int ovector[9]= {0}; + int rc; + const char* db_version= mysql_get_server_info(conn); + + re= pcre_compile(DETECT_MYSQL_REGEX, 0, &error, &erroroffset, NULL); + if (!re) { + g_critical("Regular expression fail: %s", error); + exit(EXIT_FAILURE); + } + + rc = pcre_exec(re, NULL, db_version, strlen(db_version), 0, 0, ovector, 9); + pcre_free(re); + + if (rc > 0) { + return SERVER_TYPE_MYSQL; + } + + re= pcre_compile(DETECT_DRIZZLE_REGEX, 0, &error, &erroroffset, NULL); + if (!re) { + g_critical("Regular expression fail: %s", error); + exit(EXIT_FAILURE); + } + + rc = pcre_exec(re, NULL, db_version, strlen(db_version), 0, 0, ovector, 9); + pcre_free(re); + + if (rc > 0) { + return SERVER_TYPE_DRIZZLE; + } + + re= pcre_compile(DETECT_MARIADB_REGEX, 0, &error, &erroroffset, NULL); + if (!re) { + g_critical("Regular expression fail: %s", error); + exit(EXIT_FAILURE); + } + + rc = pcre_exec(re, NULL, db_version, strlen(db_version), 0, 0, ovector, 9); + pcre_free(re); + + if (rc > 0) { + return SERVER_TYPE_MYSQL; + } + + return SERVER_TYPE_UNKNOWN; +} diff --git a/server_detect.h b/server_detect.h new file mode 100644 index 0000000..46b5da2 --- /dev/null +++ b/server_detect.h @@ -0,0 +1,28 @@ +/* + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + + Authors: Andrew Hutchings, SkySQL (andrew at skysql dot com) +*/ +#ifndef _server_detect_h +#define _server_detect_h + +#include + +#define DETECT_MYSQL_REGEX "^([3-9]\\.[0-9]+\\.[0-9]+)" +#define DETECT_DRIZZLE_REGEX "^(20[0-9]{2}\\.(0[1-9]|1[012])\\.[0-9]+)" +#define DETECT_MARIADB_REGEX "^([0-9]{1,2}\\.[0-9]+\\.[0-9]+)" + +enum server_type { SERVER_TYPE_UNKNOWN, SERVER_TYPE_MYSQL, SERVER_TYPE_DRIZZLE }; +int detect_server(MYSQL *conn); +#endif