summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorserg@infomag.ape.relarn.ru <>2000-08-15 21:52:42 +0400
committerserg@infomag.ape.relarn.ru <>2000-08-15 21:52:42 +0400
commitbf617db81805fbc3c1357b3a4eaeb90cd637272a (patch)
tree135f6b5b6961c6b7f5bd0d7351d07ae18d9a776f
parent563fdb8e2a1df787626cc92d243e392763696aba (diff)
parent883042ba1e3d6b875da68daac9c8767c1f0b1ab2 (diff)
downloadmariadb-git-bf617db81805fbc3c1357b3a4eaeb90cd637272a.tar.gz
Merge
-rw-r--r--BitKeeper/etc/logging_ok20
-rw-r--r--Docs/Makefile.am2
-rw-r--r--Docs/internals.texi22
-rw-r--r--Docs/manual.texi686
-rw-r--r--heap/hp_rnext.c2
-rw-r--r--include/config-win.h10
-rw-r--r--include/global.h4
-rw-r--r--include/my_base.h7
-rw-r--r--include/myisam.h18
-rw-r--r--isam/_dynrec.c14
-rw-r--r--isam/_search.c4
-rw-r--r--merge/rrnd.c2
-rw-r--r--myisam/ft_eval.c6
-rw-r--r--myisam/ft_parser.c2
-rw-r--r--myisam/ft_static.c20
-rw-r--r--myisam/mi_check.c95
-rw-r--r--myisam/mi_locking.c4
-rw-r--r--myisam/myisamdef.h3
-rw-r--r--myisam/myisampack.c2
-rw-r--r--myisammrg/myrg_rrnd.c6
-rw-r--r--mysql.projbin118784 -> 122880 bytes
-rw-r--r--mysys/charset.c6
-rw-r--r--mysys/mf_casecnv.c6
-rw-r--r--mysys/mf_pack.c12
-rw-r--r--mysys/mf_path.c2
-rw-r--r--mysys/mf_wfile.c3
-rw-r--r--mysys/my_alloc.c2
-rw-r--r--mysys/my_lib.c4
-rw-r--r--mysys/my_lread.c2
-rw-r--r--mysys/my_lwrite.c2
-rw-r--r--mysys/my_winthread.c6
-rw-r--r--mysys/string.c4
-rw-r--r--mysys/thr_alarm.c2
-rw-r--r--mysys/thr_rwlock.c8
-rw-r--r--regex/regcomp.c15
-rw-r--r--regex/regex.h4
-rw-r--r--regex/regex2.h4
-rwxr-xr-xscripts/mysqlhotcopy.sh2
-rwxr-xr-xsql-bench/bench-init.pl.sh5
-rwxr-xr-xsql-bench/server-cfg.sh18
-rw-r--r--sql/ChangeLog6
-rw-r--r--sql/field.cc3
-rw-r--r--sql/filesort.cc4
-rw-r--r--sql/gen_lex_hash.cc19
-rw-r--r--sql/ha_berkeley.cc40
-rw-r--r--sql/ha_berkeley.h2
-rw-r--r--sql/ha_myisam.cc69
-rw-r--r--sql/ha_myisam.h3
-rw-r--r--sql/handler.h2
-rw-r--r--sql/item_func.cc16
-rw-r--r--sql/item_timefunc.cc2
-rw-r--r--sql/mini_client.cc4
-rw-r--r--sql/mysql_priv.h1
-rw-r--r--sql/mysqld.cc11
-rw-r--r--sql/sql_load.cc5
-rw-r--r--sql/sql_parse.cc57
-rw-r--r--sql/sql_select.cc81
-rw-r--r--sql/sql_table.cc10
-rw-r--r--sql/sql_yacc.yy33
-rw-r--r--sql/structs.h3
-rw-r--r--strings/ctype-sjis.c4
-rw-r--r--strings/ctype-tis620.c10
-rw-r--r--strings/t_ctype.h4
-rw-r--r--support-files/mysql.spec.sh2
-rw-r--r--vio/viotest-ssl.cc2
-rw-r--r--vio/viotest-sslconnect.cc2
-rw-r--r--vio/viotest.cc2
67 files changed, 799 insertions, 634 deletions
diff --git a/BitKeeper/etc/logging_ok b/BitKeeper/etc/logging_ok
index 505102a839c..ed442907849 100644
--- a/BitKeeper/etc/logging_ok
+++ b/BitKeeper/etc/logging_ok
@@ -1,15 +1,15 @@
-tim@localhost.polyesthetic.msg
-tim@work.mysql.com
-monty@work.mysql.com
-tonu@work.mysql.com
-sinisa@work.mysql.com
-paul@work.mysql.com
-jamppa@work.mysql.com
+bk@work.mysql.com
davida@work.mysql.com
+jamppa@work.mysql.com
+jcole@jcole.burghcom.com
matt@work.mysql.com
-serg@work.mysql.com
-bk@work.mysql.com
+monty@work.mysql.com
+paul@work.mysql.com
sasha@mysql.sashanet.com
sasha@work.mysql.com
-jcole@jcole.burghcom.com
serg@infomag.ape.relarn.ru
+serg@work.mysql.com
+sinisa@work.mysql.com
+tim@localhost.polyesthetic.msg
+tim@work.mysql.com
+tonu@work.mysql.com
diff --git a/Docs/Makefile.am b/Docs/Makefile.am
index fbe0ca41619..1b5d77dea77 100644
--- a/Docs/Makefile.am
+++ b/Docs/Makefile.am
@@ -1,5 +1,3 @@
-# Monty
-
# Normally you do not need to remake the files here. But if you want
# to you will need the GNU TeX-info utilities. To make a Postscript
# files you also need TeX and dvips. To make the PDF file you will
diff --git a/Docs/internals.texi b/Docs/internals.texi
index 01f5b0a346c..06a9c1d9265 100644
--- a/Docs/internals.texi
+++ b/Docs/internals.texi
@@ -121,6 +121,28 @@ After this it will give other threads a possibility to open the
same tables.
@end itemize
+@node Filesort
+@chapter How do MySQL do sorting (filesort)
+
+- Read all rows according to key or by table-scanning.
+- Store the sort-key in a buffer (sort_buffer).
+- When the buffer gets full, run a qsort on it and store the result
+ in a temporary file. Save a pointer to the sorted block.
+- Repeate the above until all rows has been read.
+
+- Repeat the following until there is less than MERGEBUFF2 (15) blocks left.
+ - Do a multi-merge of up to MERGEBUFF (7) regions to one block in
+ another temporary file. Repeat until all blocks from the first file
+ is in the second file.
+- On the last multi-merge, only the pointer to the row (last part of
+ the sort-key) is written to a result file.
+
+- Now the code in sql/records.cc will be used to read through the
+ in sorted order by using the row pointersin the result file.
+ To optimize this, we read in a big block of row pointers, sort these
+ and then we read the rows in the sorted order into a row buffer
+ (record_buffer) .
+
@node Index
@unnumbered Index
diff --git a/Docs/manual.texi b/Docs/manual.texi
index c26e6d3b9fb..c944516b37e 100644
--- a/Docs/manual.texi
+++ b/Docs/manual.texi
@@ -1469,8 +1469,8 @@ connect to your @strong{MySQL} server. @xref{ODBC}.
Very fast B-tree disk tables with index compression.
@item
-Up to 16 indexes per table are allowed. Each index may consist of 1 to 16
-columns or parts of columns. The maximum index length is 256 bytes (this
+Up to 32 indexes per table are allowed. Each index may consist of 1 to 16
+columns or parts of columns. The maximum index length is 500 bytes (this
may be changed when compiling @strong{MySQL}). An index may use a prefix
of a @code{CHAR} or @code{VARCHAR} field.
@@ -1599,7 +1599,7 @@ that is unlikely to destroy any data beyond rescue, because all data are
flushed to disk between each query. There hasn't been a single bug
report about lost data because of bugs in @strong{MySQL}.
-@item The MyISAM table handler --- Beta
+@item The MyISAM table handler --- Gamma
This is new in @strong{MySQL} 3.23. It's largely based on the ISAM table code
but has a lot of new very useful features.
@@ -1621,7 +1621,7 @@ handling. Not a single reported bug in this system.
@item Query optimizer --- Stable
-@item Range optimizer --- Gamma
+@item Range optimizer --- Stable
@item Join optimizer --- Stable
@@ -1632,7 +1632,7 @@ using standard OS locking (@code{fcntl()}). In these cases, you should run the
to occur on some Linux systems, and on SunOS when using NFS-mounted file
systems.
-@item Linux threads --- Gamma
+@item Linux threads --- Stable
The major problem found has been with the @code{fcntl()} call, which is
fixed by using the @w{@code{--skip-locking}} option to
@code{mysqld}. Some people have reported lockup problems with the 0.5
@@ -1646,7 +1646,7 @@ unstable, and we have been able to reproduce a coredump after creating
@item Solaris 2.5+ pthreads --- Stable
We use this for all our production work.
-@item MIT-pthreads (Other systems) --- Gamma
+@item MIT-pthreads (Other systems) --- Stable
There have been no reported bugs since 3.20.15 and no known bugs since
3.20.16. On some systems, there is a ``misfeature'' where some operations are
quite slow (a 1/20 second sleep is done between each query). Of course,
@@ -1654,7 +1654,7 @@ MIT-pthreads may slow down everything a bit, but index-based @code{SELECT}
statements are usually done in one time frame so there shouldn't be a mutex
locking/thread juggling.
-@item Other thread implementions --- Alpha - Beta
+@item Other thread implementions --- Beta - Gamma
The ports to other systems are still very new and may have bugs, possibly
in @strong{MySQL}, but most often in the thread implementation itself.
@@ -1674,11 +1674,22 @@ Now maintained by Jochen Wiedmann
Written and maintained by Yves Carlier
@email{Yves.Carlier@@rug.ac.be}. Thanks!
-@item @code{GRANT} --- Gamma
+@item @code{GRANT} --- Stable
Big changes made in @strong{MySQL} 3.22.12.
@item @strong{MyODBC} (uses ODBC SDK 2.5) --- Gamma
It seems to work well with some programs.
+
+@item replication -- alpha
+We are still working on replication, so don't expect this to be rock
+solid yet. On the other hand, some @strong{MySQL} users are already
+using this with good results...
+
+@item BDB tables -- alpha
+The Berkeley DB code is very stable, but we are still improving the interface
+between @strong{MySQL} and BDB tables, so it will take some time before this
+is as tested as the other table types.
+
@end table
MySQL AB provides email support for paying customers, but the @strong{MySQL}
@@ -2155,7 +2166,7 @@ which mentions @strong{MySQL} in the right company}
@item
@uref{http://www.lightlink.com/hessling/rexxsql.html, A REXX interface to SQL databases}
@item
-@uref{http://www.mytcl.cx/, Tcl interface, based on tcl-sql, many bugfixes.}
+@uref{http://www.mytcl.cx/, Tcl interface based on tcl-sql with many bugfixes.}
@item
@uref{http://www.binevolve.com/~tdarugar/tcl-sql, Tcl interface}
@end itemize
@@ -2901,7 +2912,7 @@ must license the @strong{MySQL} server. Generally these examples
involve providing @strong{MySQL} as a integrated part of a product.
Note that a single @strong{MySQL} license covers any number of CPUs and
-@code{mysqld} servers on a machine! It also has no limit on the number
+@code{mysqld} servers on a machine! There is no artificial limit on the number
of clients that connect to the server in any way.
@node Products that use MySQL, ISP, Licensing examples, Licensing examples
@@ -3959,9 +3970,7 @@ DEC UNIX 4.x with native threads. @xref{Alpha-DEC-Unix}.
@item
FreeBSD 2.x with the included MIT-pthreads package. @xref{FreeBSD}.
@item
-FreeBSD 3.x with native threads. @xref{BSDI}.
-@item
-FreeBSD 4.x with native threads.
+FreeBSD 3.x and 4.x with native threads. @xref{FreeBSD}.
@item
HP-UX 10.20 with the included MIT-pthreads package. @xref{HP-UX 10.20}.
@item
@@ -4654,7 +4663,7 @@ work. @code{libg++} is not needed when using @code{gcc}. @code{gcc}
C++ files, such as @file{sql/sql_base.cc}. If you only have @code{gcc} 2.7.x,
you must upgrade your @code{gcc} to be able to compile @strong{MySQL}.
-@code{gcc} >= 2.95.2 should be used when compiling @strong{MySQL} 3.23.x.
+@code{gcc} >= 2.95.2 is recommended when compiling @strong{MySQL} 3.23.x.
@item
A good @code{make} program. GNU @code{make} is always recommended and is
@@ -5773,6 +5782,23 @@ run @code{make} again.
Configure with @code{CFLAGS=-DHAVE_CURSES CXXFLAGS=-DHAVE_CURSES ./configure}
@end itemize
+If you get a problem that your linker can't find @code{-lz} when linking
+your client program, the problem is probably that your @file{libz.so} file is
+installed in @file{/usr/local/lib}. You can fix this by one of the
+following methods:
+
+@itemize @bullet
+@item
+Add @file{/usr/local/lib} to @code{LD_LIBRARY_PATH}
+@item
+Add a link to @code{libz.so} from @code{/lib}
+@item
+If you are using Solaris 8, you can install the optional zlib from your
+Solaris 8 CD distribution.
+@item
+configure @strong{MySQL} with the @code{--with-named-z-libs=no} option.
+@end itemize
+
@node Solaris x86, SunOS, Solaris 2.7, Source install system issues
@subsection Solaris x86 notes
@@ -6213,7 +6239,10 @@ make_install
and @strong{mysqld} should be ready to run.
@node Alpha-DEC-Unix, Alpha-DEC-OSF1, Linux, Source install system issues
-@subsection Alpha-DEC-Unix notes
+@subsection Alpha-DEC-Unix notes (Tru64)
+
+If you are using egcs 1.1.2 on Digital UNIX, you should upgrade to gcc
+2.95.2, as egcs has some bad bugs on DEC!
When compiling threaded programs under Digital UNIX, the documentation
recommends using the @code{-pthread} option for @code{cc} and @code{cxx} and
@@ -6248,6 +6277,45 @@ shell> nohup mysqld [options] &
signal sent from the terminal. Alternatively, start the server by running
@code{safe_mysqld}, which invokes @code{mysqld} using @code{nohup} for you.
+If you get a problem when compiling mysys/get_opt.c, just remove the
+line #define _NO_PROTO from the start of that file!
+
+If you are using Compac's CC compiler, the following configure line should
+work:
+
+@example
+CC="cc -pthread"
+CFLAGS="-O4 -ansi_alias -ansi_args -fast -inline speed -speculate all -arch host"
+CXX="cxx -pthread"
+CXXFLAGS="-O4 -ansi_alias -ansi_args -fast -inline speed -speculate all -arch host"
+export CC CFLAGS CXX CXXFLAGS
+./configure \
+--prefix=/usr/local/mysql \
+--with-low-memory \
+--enable-large-files \
+--enable-shared=yes \
+--with-named-thread-libs="-lpthread -lmach -lexc -lc"
+gnumake
+@end example
+
+If you get a problem with libtool, when compiling with shared libraries
+as above, when linking @code{mysql}, you should be able to get around
+this by issuing:
+
+@example
+cd mysql
+/bin/sh ../libtool --mode=link cxx -pthread -O3 -DDBUG_OFF \
+-O4 -ansi_alias -ansi_args -fast -inline speed \
+-speculate all \ -arch host -DUNDEF_HAVE_GETHOSTBYNAME_R \
+-o mysql mysql.o readline.o sql_string.o completion_hash.o \
+../readline/libreadline.a -lcurses \
+../libmysql/.libs/libmysqlclient.so -lm
+cd ..
+gnumake
+gnumake install
+scripts/mysql_install_db
+@end example
+
@node Alpha-DEC-OSF1, SGI-Irix, Alpha-DEC-Unix, Source install system issues
@subsection Alpha-DEC-OSF1 notes
@@ -6334,10 +6402,11 @@ Please submit a full bug report.
@end example
To fix this you should change to the @code{sql} directory and do a 'cut
-and paste' of the last @code{gcc} line, but change @code{-O3} to @code{-O0} (or add
-@code{-O0} immediately after @code{gcc} if you don't have any @code{-O}
-option on your compile line. After this is done you can just change back to
-the top level directly and run @code{make} again.
+and paste' of the last @code{gcc} line, but change @code{-O3} to
+@code{-O0} (or add @code{-O0} immediately after @code{gcc} if you don't
+have any @code{-O} option on your compile line. After this is done you
+can just change back to the top level directly and run @code{make}
+again.
@node SGI-Irix, FreeBSD, Alpha-DEC-OSF1, Source install system issues
@subsection SGI-Irix notes
@@ -6757,7 +6826,6 @@ handling in @code{gcc}/@code{egcs} is not thread-safe! (This is tested with
@example
shell> CC=gcc \
- CFLAGS="" \
CXX=gcc \
CXXFLAGS="-felide-constructors -fno-exceptions -fno-rtti" \" \
./configure --prefix=/usr/local/mysql --with-debug --with-low-memory
@@ -9650,6 +9718,12 @@ When running @strong{MySQL}, follow these guidelines whenever possible:
@itemize @bullet
@item
+DON'T EVER GIVE ANYBODY (BUT THE @strong{MySQL} ROOT USER) ACCESS TO THE
+mysql.user DATABASE! The encrypted password is the real password in
+@strong{MySQL}; If you know this for one user you can easily login as
+him if you have access to his 'host'.
+
+@item
Learn the @strong{MySQL} access privilege system. The @code{GRANT} and
@code{REVOKE} commands are used for restricting access to @strong{MySQL}. Do
not grant any more privileges than necessary. Never grant privileges to all
@@ -9961,10 +10035,12 @@ necessary connection between the password you use to log in to a Unix machine
and the password you use to access a database on that machine.
@item
-@strong{MySQL} encrypts passwords using a different algorithm than the one
-used during the Unix login process. See the descriptions of the
+@strong{MySQL} encrypts passwords using a different algorithm than the
+one used during the Unix login process. See the descriptions of the
@code{PASSWORD()} and @code{ENCRYPT()} functions in @ref{Miscellaneous
-functions}.
+functions}. Note that even if the password is stored 'scrambled', as
+it's enough to know your 'scrambled' password to be able to connect to
+the the @strong{MySQL} server !
@end itemize
@node Connecting, Password security, User names, Privilege system
@@ -10058,6 +10134,13 @@ the risks of each method:
@itemize @bullet
@item
+Never give a normal user access to the @code{mysql.user} table. Knowing
+the encrypted password for a user makes it possible to login as this
+user. The passwords are only scrambled so that one shouldn't be able to
+see the real password you used (if you happen to use a similar password
+with your other applications).
+
+@item
Use a @code{-pyour_pass} or @code{--password=your_pass} option on the command
line. This is convenient but insecure, because your password becomes visible
to system status programs (such as @code{ps}) that may be invoked by other
@@ -10241,10 +10324,13 @@ The @strong{process} privilege can be used to view the plain text of
currently executing queries, including queries that set or change passwords.
@item
-Privileges on the @code{mysql} database can be used to change passwords and
-other access privilege information. (Passwords are stored encrypted, so a
-malicious user cannot simply read them. However, with sufficient privileges,
-that same user can replace a password with a different one.)
+Privileges on the @code{mysql} database can be used to change passwords
+and other access privilege information. (Passwords are stored
+encrypted, so a malicious user cannot simply read them to know the plain
+text password). If they can access the @code{mysql.user} password
+column, they can however use it to login into the @strong{MySQL} server
+for the given user. With sufficient privileges, the same use can
+however replace a password with a different one.)
@end itemize
There are some things that you cannot do with the @strong{MySQL}
@@ -10538,11 +10624,15 @@ matches, it means the user must connect without specifying a password.
@findex PASSWORD()
Non-blank @code{Password} values represent encrypted passwords.
-@strong{MySQL} does not store passwords in plaintext form for anyone to see.
-Rather, the password supplied by a user who is attempting to connect is
-encrypted (using the @code{PASSWORD()} function) and compared to the
-already-encrypted version stored in the @code{user} table. If they match,
-the password is correct.
+@strong{MySQL} does not store passwords in plaintext form for anyone to
+see. Rather, the password supplied by a user who is attempting to
+connect is encrypted (using the @code{PASSWORD()} function). The
+encrypted password is then used when the client/server is checking if
+the password is connect (This is done without the encrypted password
+ever traveling over the connection). Note that from @strong{MySQL}'s
+point of view the encrypted password is the REAL password, so you should
+not give anyone access to it! In particular, don't give normal users
+read access to the tables in the @code{mysql} database!
The examples below show how various combinations of @code{Host} and
@code{User} values in @code{user} table entries apply to incoming
@@ -11151,6 +11241,9 @@ Website}.
@cindex Passwords, setting
@findex PASSWORD()
+In most cases you should use @code{GRANT} to set up your users/passwords,
+so the following only applies for advanced users. @xref{GRANT, , @code{GRANT}}.
+
The examples in the preceding sections illustrate an important principle:
when you store a non-empty password using @code{INSERT} or @code{UPDATE}
statements, you must use the @code{PASSWORD()} function to encrypt it. This
@@ -11226,10 +11319,10 @@ action you can take to correct the problem:
@itemize @bullet
@item
-After installing @strong{MySQL}, did you run the @code{mysql_install_db} script
-to set up the initial grant table contents? If not, do
-so. @xref{Default privileges}. Test the initial privileges by
-executing this command:
+After installing @strong{MySQL}, did you run the @code{mysql_install_db}
+script to set up the initial grant table contents? If not, do so.
+@xref{Default privileges}. Test the initial privileges by executing
+this command:
@example
shell> mysql -u root test
@@ -11275,42 +11368,12 @@ the grant tables changed with @strong{MySQL} 3.22.11 when the @code{GRANT}
statement became functional.
@item
-If you make changes to the grant tables directly (using @code{INSERT} or
-@code{UPDATE} statement) and your changes seem to be ignored, remember that
-you must issue a @code{FLUSH PRIVILEGES} statement or execute a
-@code{mysqladmin flush-privileges} command to cause the server to reread the
-tables. Otherwise your changes have no effect until the next time the server
-is restarted. Remember that after you set the @code{root} password, you
-won't need to specify it until after you flush the privileges, because the
-server still won't know you've changed the password yet!
-
-@item
If your privileges seem to have changed in the middle of a session, it may be
that a superuser has changed them. Reloading the grant tables affects new
client connections, but it also affects existing connections as indicated in
@ref{Privilege changes}.
@item
-For testing, start the @code{mysqld} daemon with the
-@code{--skip-grant-tables} option. Then you can change the @strong{MySQL}
-grant tables and use the @code{mysqlaccess} script to check whether or not
-your modifications have the desired effect. When you are satisfied with your
-changes, execute @code{mysqladmin flush-privileges} to tell the @code{mysqld}
-server to start using the new grant tables. @strong{Note:} Reloading the
-grant tables overrides the @code{--skip-grant-tables} option. This allows
-you to tell the server to begin using the grant tables again without bringing
-it down and restarting it.
-
-@item
-If you have access problems with a Perl, PHP, Python or ODBC program, try to
-connect to the server with @code{mysql -u user_name db_name} or @code{mysql
--u user_name -pyour_pass db_name}. If you are able to connect using the
-@code{mysql} client, there is a problem with your program and not with the
-access privileges. (Notice that there is no space between @code{-p} and the
-password; you can also use the @code{--password=your_pass} syntax to specify
-the password.)
-
-@item
If you can't get your password to work, remember that you must use
the @code{PASSWORD()} function if you set the password with the
@code{INSERT}, @code{UPDATE} or @code{SET PASSWORD} statements. The
@@ -11349,7 +11412,9 @@ The @code{Access denied} error message will tell you who you are trying
to log in as, the host from which you are trying to connect, and whether
or not you were using a password. Normally, you should have one entry in
the @code{user} table that exactly matches the hostname and user name
-that were given in the error message.
+that were given in the error message. For example if you get an error
+message that contains @code{Using password: NO}, this means that you
+tried to login without an password.
@item
If you get the following error when you try to connect from a different host
@@ -11382,6 +11447,36 @@ yourself; A source RPM is normally trivial to compile and install, so
normally this isn't a big problem.
@item
+If you get an error message where the hostname is not shown or where the
+host name is an IP, even if you try to connect with an hostname:
+
+@example
+shell> mysqladmin -u root -pxxxx -h some-hostname ver
+Access denied for user: 'root@' (Using password: YES)
+@end example
+
+This means that @strong{MySQL} got some error when trying to resolve the
+IP to a hostname. In this case you can execute @code{mysqladmin
+flush-hosts} to reset the internal DNS cache. Some permanent solutions
+are:
+
+@itemize @bullet
+@item
+Try to find out what is wrong with your DNS server and fix this.
+@item
+Specify IP's instead of hostnames in the @code{MySQL} privilege tables.
+@item
+Start mysqld with @code{--skip-name-resolve}.
+@item
+Start mysqld with @code{--skip-host-cache}.
+@item
+Connect to @code{localhost} if you are running the server and the client
+on the same machine.
+@item
+Put the client machine names in @code{/etc/hosts}.
+@end itemize
+
+@item
If @code{mysql -u root test} works but @code{mysql -h your_hostname -u root
test} results in @code{Access denied}, then you may not have the correct name
for your host in the @code{user} table. A common problem here is that the
@@ -11457,6 +11552,37 @@ sure you haven't specified an old password in any of your option files!
@xref{Option files}.
@item
+If you make changes to the grant tables directly (using @code{INSERT} or
+@code{UPDATE} statement) and your changes seem to be ignored, remember
+that you must issue a @code{FLUSH PRIVILEGES} statement or execute a
+@code{mysqladmin flush-privileges} command to cause the server to reread
+the privilege tables. Otherwise your changes have no effect until the
+next time the server is restarted. Remember that after you set the
+@code{root} password with an @code{UPDATE} command, you won't need to
+specify it until after you flush the privileges, because the server
+still won't know you've changed the password yet!
+
+@item
+If you have access problems with a Perl, PHP, Python or ODBC program, try to
+connect to the server with @code{mysql -u user_name db_name} or @code{mysql
+-u user_name -pyour_pass db_name}. If you are able to connect using the
+@code{mysql} client, there is a problem with your program and not with the
+access privileges. (Notice that there is no space between @code{-p} and the
+password; you can also use the @code{--password=your_pass} syntax to specify
+the password.)
+
+@item
+For testing, start the @code{mysqld} daemon with the
+@code{--skip-grant-tables} option. Then you can change the @strong{MySQL}
+grant tables and use the @code{mysqlaccess} script to check whether or not
+your modifications have the desired effect. When you are satisfied with your
+changes, execute @code{mysqladmin flush-privileges} to tell the @code{mysqld}
+server to start using the new grant tables. @strong{Note:} Reloading the
+grant tables overrides the @code{--skip-grant-tables} option. This allows
+you to tell the server to begin using the grant tables again without bringing
+it down and restarting it.
+
+@item
If everything else fails, start the @code{mysqld} daemon with a debugging
option (for example, @code{--debug=d,general,query}). This will print host and
user information about attempted connections, as well as information about
@@ -15288,6 +15414,17 @@ a way that it is equivalent to @code{"1:10" MINUTE_SECOND}. This is
analogous to the way that @strong{MySQL} interprets @code{TIME} values
as representing elapsed time rather than as time of day.
+Note that if you add/subtract a date value against something that
+contains a time part, the date value will automaticly be converted to a
+datetime value:
+
+@example
+mysql> select date_add("1999-01-01", interval 1 day);
+ -> 1999-01-02
+mysql> select date_add("1999-01-01", interval 1 hour);
+ -> 1999-01-01 01:00:00
+@end example
+
If you use really incorrect dates, the result is @code{NULL}. If you add
@code{MONTH}, @code{YEAR_MONTH} or @code{YEAR} and the resulting date
has a day that is larger than the maximum day for the new month, the day is
@@ -15836,7 +15973,7 @@ mysql> select student_name, AVG(test_score)
@itemx MAX(expr)
Returns the minimum or maximum value of @code{expr}. @code{MIN()} and
@code{MAX()} may take a string argument; in such cases they return the
-minimum or maximum string value.
+minimum or maximum string value. @xref{MySQL indexes}.
@example
mysql> select student_name, MIN(test_score), MAX(test_score)
@@ -16533,6 +16670,11 @@ index exists, it drops the first @code{UNIQUE} index in the table.
if no @code{PRIMARY KEY} was specified explicitly.)
@item
+If you are doing an @code{ALTER TABLE} on/to a @code{MyISAM} table,
+all non unique index are created in a separate batch (like in @code{REPAIR}).
+This should make @code{ALTER TABLE} much faster when you have many index.
+
+@item
@findex mysql_info()
With the C API function @code{mysql_info()}, you can find out how many
records were copied, and (when @code{IGNORE} is used) how many records were
@@ -16600,7 +16742,10 @@ indexed, and also that we declare @code{c} as @code{NOT NULL}, because
indexed columns cannot be @code{NULL}.
When you add an @code{AUTO_INCREMENT} column, column values are filled in
-with sequence numbers for you automatically.
+with sequence numbers for you automatically. You can set the first
+sequence number be executing @code{SET INSERT_ID=#} before
+@code{ALTER TABLE} or using the @code{AUTO_INCREMENT = #} table option.
+@xref{SET OPTION}.
See also @xref{ALTER TABLE problems, , @code{ALTER TABLE} problems}.
@@ -17491,6 +17636,11 @@ If you load data from a local file using the @code{LOCAL} keyword, the server
has no way to stop transmission of the file in the middle of the operation,
so the default bahavior is the same as if @code{IGNORE} is specified.
+If you are @code{LOAD DATA INFILE} to an empty @code{MyISAM} table,
+all non unique index are created in a separate batch (like in @code{REPAIR}).
+This normally makes @code{LOAD DATA INFILE} much faster when you have many
+index.
+
@code{LOAD DATA INFILE} is the complement of @code{SELECT ... INTO OUTFILE}.
@xref{SELECT, , @code{SELECT}}.
To write data from a database to a file, use @code{SELECT ... INTO OUTFILE}.
@@ -18613,7 +18763,7 @@ closing it. See also @code{interactive_timeout}.
@end table
The manual section that describes tuning @strong{MySQL} contains some
-information of how to tune the above variables.
+information of how to tune the above variables. @xref{Server parameters}.
@findex Threads
@findex PROCESSLIST
@@ -19259,8 +19409,9 @@ the update log when you use @code{LAST_INSERT_ID()} in a command that updates
a table.
@item INSERT_ID = #
-Set the value to be used by the following @code{INSERT} command when inserting
-an @code{AUTO_INCREMENT} value. This is mainly used with the update log.
+Set the value to be used by the following @code{INSERT} or @code{ALTER TABLE}
+command when inserting an @code{AUTO_INCREMENT} value. This is mainly used
+with the update log.
@end table
@findex GRANT
@@ -20246,7 +20397,7 @@ The following options to @code{mysqld} can be used to change the behavour of
BDB tables:
@multitable @columnfractions .30 .70
-@item --bdb-home= directory @tab Berkeley home direcory
+@item --bdb-home= directory @tab Base directory for BDB tables. This should be the same directory you use for --datadir.
@item --bdb-lock-detect=# @tab Berkeley lock detect. One of (DEFAULT, OLDEST, RANDOM or YOUNGEST)
@item --bdb-logdir=directory @tab Berkeley DB log file directory
@item --bdb-nosync @tab Don't synchronously flush logs
@@ -23442,288 +23593,12 @@ values it actually is using for the variables by executing this command:
@example
shell> mysqladmin variables
@end example
-<<<<<<< manual.texi
-
-Each option is described below. Values for buffer sizes, lengths and stack
-sizes are given in bytes. You can specify values with a suffix of @samp{K}
-or @samp{M} to indicate kilobytes or megabytes. For example, @code{16M}
-indicates 16 megabytes. The case of suffix letters does not matter;
-@code{16M} and @code{16m} are equivalent.
-
-You can also see some statistics from a running server by issuing the command
-@code{SHOW STATUS}. @xref{SHOW}.
-
-@table @code
-@item @code{ansi_mode}.
-Is @code{ON} if @code{mysqld} was started with @code{--ansi}.
-@xref{Ansi mode}.
-
-@item @code{back_log}
-The number of outstanding connection requests @strong{MySQL} can have. This
-comes into play when the main @strong{MySQL} thread gets @strong{VERY}
-many connection requests in a very short time. It then takes some time
-(although very little) for the main thread to check the connection and start
-a new thread. The @code{back_log} value indicates how many requests can be
-stacked during this short time before @strong{MySQL} momentarily stops
-answering new requests. You need to increase this only if you expect a large
-number of connections in a short period of time.
-
-In other words, this value is the size of the listen queue for incoming
-TCP/IP connections. Your operating system has its own limit on the size
-of this queue. The manual page for the Unix @code{listen(2)} system
-call should have more details. Check your OS documentation for the
-maximum value for this variable. Attempting to set @code{back_log}
-higher than your operating system limit will be ineffective.
-
-@item @code{bdb_cache_size}
-The buffer that is allocated to cache index and rows for @code{BDB} tables.
-If you don't use @code{BDB} tables, you should set this to 0 or
-start @code{mysqld} with @code{--skip-bdb} o not waste memory for this cache.
-
-@item @code{concurrent_inserts}
-If @code{ON} (the default), @code{MySQL} will allow you to use @code{INSERT}
-on @code{MyISAM} tables at the same time as you run @code{SELECT} queries
-on them. You can turn this option off by starting mysqld with @code{--safe}
-or @code{--skip-new}.
-
-@item @code{connect_timeout}
-The number of seconds the @code{mysqld} server is waiting for a connect
-packet before responding with @code{Bad handshake}.
-
-@item @code{delayed_insert_timeout}
-How long a @code{INSERT DELAYED} thread should wait for @code{INSERT}
-statements before terminating.
-
-@item @code{delayed_insert_limit}
-After inserting @code{delayed_insert_limit} rows, the @code{INSERT
-DELAYED} handler will check if there are any @code{SELECT} statements
-pending. If so, it allows these to execute before continuing.
-
-@item @code{delay_key_write}
-If enabled (is on by default), @strong{MySQL} will honor the
-@code{delay_key_write} option @code{CREATE TABLE}. This means that the
-key buffer for tables with this option will not get flushed on every
-index update, but only when a table is closed. This will speed up
-writes on keys a lot but you should add automatic checking of all tables
-with @code{myisamchk --fast --force} if you use this. Note that if you
-start @code{mysqld} with the @code{--delay-key-write-for-all-tables}
-option this means that all tables will be treated as if they were
-created with the @code{delay_key_write} option. You can clear this flag
-by starting @code{mysqld} with @code{--skip-new} or @code{--safe-mode}.
-
-@item @code{delayed_queue_size}
-What size queue (in rows) should be allocated for handling @code{INSERT
-DELAYED}. If the queue becomes full, any client that does @code{INSERT
-DELAYED} will wait until there is room in the queue again.
-
-@item @code{flush_time}
-If this is set to a non-zero value, then every @code{flush_time} seconds all
-tables will be closed (to free up resources and sync things to disk).
-
-@item @code{init_file}
-The name of the file specified with the @code{--init-file} option when
-you start the server. This is a file of SQL statements you want the
-server to execute when it starts.
-
-@item @code{interactive_timeout}
-The number of seconds the server waits for activity on an interactive
-connection before closing it. An interactive client is defined as a
-client that uses the @code{CLIENT_INTERACTIVE} option to
-@code{mysql_real_connect()}. See also @code{wait_timeout}.
-
-@item @code{join_buffer_size}
-The size of the buffer that is used for full joins (joins that do not
-use indexes). The buffer is allocated one time for each full join
-between two tables. Increase this value to get a faster full join when
-adding indexes is not possible. (Normally the best way to get fast joins
-is to add indexes.)
-
-@c Make texi2html support index @anchor{Index cache size}. Then change
-@c some xrefs to point here
-@item @code{key_buffer_size}
-Index blocks are buffered and are shared by all threads.
-@code{key_buffer_size} is the size of the buffer used for index blocks.
-
-Increase this to get better index handling (for all reads and multiple
-writes) to as much as you can afford; 64M on a 256M machine that mainly
-runs @strong{MySQL} is quite common. If you however make this too big
-(more than 50% of your total memory?) your system may start to page
-and become REALLY slow. Remember that because @strong{MySQL} does not cache data
-read, that you will have to leave some room for the OS filesystem cache.
-You can check the performance of the key buffer by doing @code{show
-status} and examine the variables @code{Key_read_requests},
-@code{Key_reads}, @code{Key_write_requests} and @code{Key_writes}. The
-@code{Key_reads/Key_read_request} ratio should normally be < 0.01.
-The @code{Key_write/Key_write_requests} is usually near 1 if you are
-using mostly updates/deletes but may be much smaller if you tend to
-do updates that affect many at the same time or if you are
-using @code{delay_key_write}. @xref{SHOW}.
-
-To get even more speed when writing many rows at the same time use
-@code{LOCK TABLES}. @xref{LOCK TABLES, , @code{LOCK TABLES}}.
-
-@item @code{lower_case_table_names}
-Change all table names to lower case on disk.
-
-@item @code{long_query_time}
-If a query takes longer than this (in seconds), the @code{Slow_queries} counter
-will be incremented.
-
-@item @code{max_allowed_packet}
-The maximum size of one packet. The message buffer is initialized to
-@code{net_buffer_length} bytes, but can grow up to @code{max_allowed_packet}
-bytes when needed. This value by default is small, to catch big (possibly
-wrong) packets. You must increase this value if you are using big
-@code{BLOB} columns. It should be as big as the biggest @code{BLOB} you want
-to use.
-
-@item @code{max_connections}
-The number of simultaneous clients allowed. Increasing this value increases
-the number of file descriptors that @code{mysqld} requires. See below for
-comments on file descriptor limits. @xref{Too many connections}.
-
-@item @code{max_connect_errors}
-If there is more than this number of interrupted connections from a host
-this host will be blocked from further connections. You can unblock a host
-with the command @code{FLUSH HOSTS}.
-
-@item @code{max_delayed_threads}
-Don't start more than this number of threads to handle @code{INSERT DELAYED}
-statements. If you try to insert data into a new table after all @code{INSERT
-DELAYED} threads are in use, the row will be inserted as if the
-@code{DELAYED} attribute wasn't specified.
-
-@item @code{max_join_size}
-Joins that are probably going to read more than @code{max_join_size}
-records return an error. Set this value if your users tend to perform joins
-without a @code{WHERE} clause that take a long time and return
-millions of rows.
-
-@item @code{max_heap_table_size}
-Don't allow creation of heap tables bigger than this.
-
-@item @code{max_sort_length}
-The number of bytes to use when sorting @code{BLOB} or @code{TEXT}
-values (only the first @code{max_sort_length} bytes of each value
-are used; the rest are ignored).
-
-@item @code{max_tmp_tables}
-(This option doesn't yet do anything).
-Maximum number of temporary tables a client can keep open at the same time.
-
-@item @code{max_write_lock_count}
-After this many write locks, allow some read locks to run in between.
-
-@item @code{myisam_sort_buffer_size}
-The buffer that is allocated when sorting the index when doing a @code{REPAIR}
-table.
-
-@item @code{net_buffer_length}
-The communication buffer is reset to this size between queries. This
-should not normally be changed, but if you have very little memory, you
-can set it to the expected size of a query. (That is, the expected length of
-SQL statements sent by clients. If statements exceed this length, the buffer
-is automatically enlarged, up to @code{max_allowed_packet} bytes.)
-
-@item @code{net_retry_count}
-If a read on a communication port is interrupted, retry this many times
-before giving up. This value should be quite high on @code{FreeBSD} as
-internal interrupts is sent to all threads.
-
-@item @code{net_read_timeout}
-Number of seconds to wait for more data from a connection before aborting
-the read. Note that when we don't expect data from a connection, the timeout
-is defined by @code{write_timeout}.
-
-@item @code{net_write_timeout}
-Number of seconds to wait for a block to be written to a connection before
-aborting the write.
-=======
->>>>>>> 1.148
-
-<<<<<<< manual.texi
-@item @code{record_buffer}
-Each thread that does a sequential scan allocates a buffer of this
-size for each table it scans. If you do many sequential scans, you may
-want to increase this value.
-=======
-or the @code{SHOW VARIABLES} in the @code{mysql} command client.
->>>>>>> 1.148
-
-<<<<<<< manual.texi
-@item @code{query_buffer_size}
-The initial allocation of the query buffer. If most of your queries are
-long (like when inserting blobs), you should increase this!
-=======
You can find a full description for all variables in the @code{SHOW VARIABLES}
section in this manual. @xref{SHOW VARIABLES}.
->>>>>>> 1.148
-<<<<<<< manual.texi
-@item @code{skip_show_databases}
-This prevents people from doing @code{SHOW DATABASES}, if they don't
-have the @code{PROCESS_PRIV} privilege. This can improve security if
-you're concerned about people being able to see what databases and
-tables other users have.
-
-@item @code{slow_launch_time}
-If the creating of the thread longer than this (in seconds), the
-@code{Slow_launch_threads} counter will be incremented.
-
-@item @code{sort_buffer}
-Each thread that needs to do a sort allocates a buffer of this
-size. Increase this value for faster @code{ORDER BY} or @code{GROUP BY}
-operations.
-@xref{Temporary files}.
-
-@item @code{table_cache}
-The number of open tables for all threads. Increasing this value
-increases the number of file descriptors that @code{mysqld} requires.
-@strong{MySQL} needs two file descriptors for each unique open table.
-See below for comments on file descriptor limits. You can check if you
-need to increase the table cache by checking the @code{Opened_tables}
-variable. @xref{SHOW}. If this variable is big and you don't do
-@code{FLUSH TABLES} a lot (which just forces all tables to be closed and
-reopenend), then you should increase the value of this variable.
-
-For information about how the table cache works, see @ref{Table cache}.
-
-@item @code{thread_cache_size}
-How many threads we should keep keep in a cache for reuse. When a
-client disconnects the clients threads is put in the cache if there
-isn't more than @code{thread_cache_size} threads from before. All new
-threads are first taken from the cache and only when the cache is empty
-a new thread is created. This variable can be increased to improve
-performance if you have a lot of new connections (Normally this doesn't
-however give a notable performance improvement if you have a good
-thread implementation).
-
-@item @code{thread_concurrency}
-On Solaris, @code{mysqld} will call @code{thr_setconcurrency()} with
-this value. @code{thr_setconcurrency()} permits the application to give
-the threads system a hint, for the desired number of threads that should
-be run at the same time.
-
-@item @code{thread_stack}
-The stack size for each thread. Many of the limits detected by the
-@code{crash-me} test are dependent on this value. The default is
-large enough for normal operation. @xref{Benchmarks}.
-
-@item @code{tmp_table_size}
-If an in-memory temporary table exceeds this size, @strong{MySQL}
-will automatically convert it to an on-disk @code{MyISAM} table.
-Increase the value of @code{tmp_table_size} if you do many advanced
-@code{GROUP BY} queries and you have lots of memory.
-
-@item @code{wait_timeout}
-The number of seconds the server waits for activity on a connection before
-closing it. See also @code{interactive_timeout}.
-@end table
-=======
You can also see some statistics from a running server by issuing the command
@code{SHOW STATUS}. @xref{SHOW STATUS}.
->>>>>>> 1.148
@strong{MySQL} uses algorithms that are very scalable, so you can usually
run with very little memory. If you however give @strong{MySQL} more
@@ -23941,7 +23816,8 @@ concurrently-running thread. For each concurrent thread, a table structure,
column structures for each column, and a buffer of size @code{3 * n} is
allocated (where @code{n} is the maximum row length, not counting @code{BLOB}
columns). A @code{BLOB} uses 5 to 8 bytes plus the length of the @code{BLOB}
-data.
+data. The @code{ISAM}/@code{MyISAM} table handlers will use one extra row
+buffer for internal usage.
@item
For each table having @code{BLOB} columns, a buffer is enlarged dynamically
@@ -24208,7 +24084,12 @@ Retrieve rows from other tables when performing joins.
@item
Find the @code{MAX()} or @code{MIN()} value for a specific indexed
-column.
+column. This is optimized by a pre-processor that checks if you are
+using @code{WHERE} key_part_# = constant on all key parts < N. In this case
+@strong{MySQL} will do a single key lookup and replace the @code{MIN()}
+expression with a constant. If all expressions are replaced with
+constants, the query will return at once.
+
@example
SELECT MIN(key_part2),MAX(key_part2) FROM table_name where key_part1=10
@end example
@@ -25664,6 +25545,10 @@ Note that if you run @code{mysqldump} without @code{--quick} or
memory before dumping the result. This will probably be a problem if
you are dumping a big database.
+Note that if you are using a new copy of the @code{mysqldump} program
+and is going to do a dump that will be read into a very old @code{MySQL}
+server, you should not use the @code{--opt} or @code{-e} options.
+
@code{mysqldump} supports the following options:
@table @code
@@ -28140,6 +28025,12 @@ some user that it works:
To make Access work:
@itemize @bullet
@item
+
+If you are using Access 2000, you should get an install Microsoft MDAC from
+@uref{http://www.microsoft.com/data/download_21242023.htm}. This will
+fix the bug in Access that when you export data to @strong{MySQL}, the
+table and column names wasn't specified.
+@item
You should have a primary key in the table.
@item
You should have a timestamp in all tables you want to be able to update.
@@ -34769,7 +34660,6 @@ dictionary, configuration files and templates, and allows "pre-processing"
and "post-processing" on both fields, records and operations.
@end itemize
-
@appendixsec Web tools
@itemize @bullet
@@ -34971,6 +34861,12 @@ Patches for @code{radiusd} to make it support @strong{MySQL}. By Wim Bonis,
@appendixsec Useful tools
@itemize @bullet
+@item @uref{http://www.mysql.com/Downloads/Contrib/mytop, mytop}
+@item @uref{http://public.yahoo.com/~jzawodn/mytop/, mytop home page}
+mytop is a perl program which allows you to monitor MySQL servers by
+viewing active threads, queries and overall server performance
+numbers. By Jeremy D. Zawodny.
+
@item @uref{http://www.mysql.com/Downloads/Contrib/mysql_watchdog.pl, mysql_watchdog.pl}
Monitor the @strong{MySQL} daemon for possible lockups. By Yermo Lamers,
@email{yml@@yml.com}.
@@ -35004,6 +34900,10 @@ By Daniel Koch.
@item @uref{http://www.mysql.com/Downloads/Contrib/dbcheck, dbcheck}
Perl script that takes a backup of a tables before running isamchk on them.
By Elizabeth.
+
+@item @uref{http://www.mysql.com/Downloads/Contrib/mybackup}
+@item @uref{http://www.mswanson.com/mybackup, mybackup home page}
+Wrapper for mysqldump to backup all databases. By "Marc Swanson".
@end itemize
@appendixsec RPMs for common tools (Most are for RedHat 6.1)
@@ -35338,6 +35238,9 @@ Simple billing/license/support/copyright issues.
ODBC and VisualC++ interface questions.
@item Randy Harmon @email{rjharmon@@uptimecomputers.com}
@code{DBD}, Linux, some SQL syntax questions.
+@item Konark IA-64 Centre of Persistent Systems Private Limited
+@uref{http://www.pspl.co.in/konark/}. Help with the Win64 port of the
+@strong{MySQL} server.
@end table
@node News, Bugs, Credits, Top
@@ -35376,6 +35279,7 @@ old code in @strong{MySQL} 3.23 so this version should stabilise pretty soon
and will soon be declared beta, gamma and release.
@menu
+* News-3.23.23:: Changes in release 3.23.23
* News-3.23.22:: Changes in release 3.23.22
* News-3.23.21:: Changes in release 3.23.21
* News-3.23.20:: Changes in release 3.23.20
@@ -35401,7 +35305,38 @@ and will soon be declared beta, gamma and release.
* News-3.23.0:: Changes in release 3.23.0
@end menu
-@node News-3.23.22, News-3.23.21, News-3.23.x, News-3.23.x
+@node News-3.23.23, News-3.23.22, News-3.23.x, News-3.23.x
+@appendixsubsec Changes in release 3.23.23
+@itemize @bullet
+@item
+Changed @code{ALTER TABLE} to create non unique index in a separate batch
+(which should make @code{ALTER TABLE} much faster when you have many index)
+@item
+Added delayed index handling to @code{LOAD DATA INFILE}, when you are
+reading into an empty file.
+@item
+@code{ALTER TABLE} now logs the first used insert_id correctly.
+@item
+Fixed crash when adding a default value to a @code{BLOB} column.
+@item
+Fixed a bug with @code{DATE_ADD/DATE_SUB} where it returned a datetime instead
+of a date.
+datetime.
+@item
+Fixed a problem with the thread cache which made some threads show up as
+@code{***DEAD***} in @code{SHOW PROCESSLIST}.
+@item
+Fixed a lock in our thr_rwlock code, which could make selects that run
+at the same time as concurrent inserts crash. This only affects systems
+that don't have the @code{pthread_rwlock_rdlock} code.
+@item
+When deleting rows with a non-unique key in HEAP table, all rows wasn't
+always deleted.
+@item
+Fixed the BDB tables works on part keys.
+@end itemize
+
+@node News-3.23.22, News-3.23.21, News-3.23.23, News-3.23.x
@appendixsubsec Changes in release 3.23.22
@itemize @bullet
@item
@@ -35433,9 +35368,12 @@ Added @code{ANALYZE table_name} to update key statistics for tables.
@item
Changed binary items @code{0x...} to be default regarded as an integer
@item
-Fix for SCO and @code{show proesslist}.
+Fix for SCO and @code{SHOW PROCESSLIST}.
@item
Added @code{auto-rehash} on reconnect for the @code{mysql} client.
+@item
+Fixed a newly introduced bug in @code{MyISAM}, where the indexfile couldn't
+get bigger than 64M.
@end itemize
@node News-3.23.21, News-3.23.20, News-3.23.22, News-3.23.x
@@ -39316,28 +39254,6 @@ Fixed @code{DISTINCT} with calculated columns.
@node Bugs, TODO, News, Top
@appendix Known errors and design deficiencies in MySQL
-The following is known bugs in @strong{MySQL} 3.23.22 that will be fixed
-in the next release:
-@itemize @bullet
-@item
-Searching on part keys on BDB tables doesn't return all rows:
-
-@example
-CREATE TABLE t1 (
- user_id int(10) DEFAULT '0' NOT NULL,
- name varchar(100),
- phone varchar(100),
- ref_email varchar(100) DEFAULT '' NOT NULL,
- detail varchar(200),
- PRIMARY KEY (user_id,ref_email)
-)type=bdb;
-INSERT INTO t1 VALUES (10292,'sanjeev','29153373','sansh777.hotmail.com','xxx'),(10292,'shirish','2333604','shirish.yahoo.com','ddsds'),(10292,'sonali','323232','sonali.bolly.com','filmstar');
-select * from t1 where user_id=10292;
-@end example
-@end itemize
-
-Other known problems:
-
@itemize @bullet
@item
You cannot build in another directory when using
@@ -39496,6 +39412,10 @@ Fail safe replication.
@item
Optimize, test and document transactions safe tables
@item
+@code{RENAME table as table, table as table [,...]}
+@item
+Allow users to change startup options.
+@item
Subqueries. @code{select id from t where grp in (select grp from g where u > 100)}
@item
Port of @strong{MySQL} to BeOS.
@@ -39506,14 +39426,13 @@ can gracefully recover if the index file gets full.
If you perform an @code{ALTER TABLE} on a table that is symlinked to another
disk, create temporary tables on this disk.
@item
-@code{RENAME table as table, table as table [,...]}
-@item
-Allow users to change startup options.
-@item
FreeBSD and MIT-pthreads; Do sleeping threads take CPU?
@item
Check if locked threads take any CPU.
@item
+Fix configure so the one can compile all libraries (like @code{MyISAM})
+without threads.
+@item
Change to use mkstemp() instead of tempnam() for system that supports the call.
We need to add a my_mkstemp() function in mysys and also change the cache
code to not create the filename until we do the actual open.
@@ -39568,6 +39487,10 @@ LOAD DATA INFILE 'file_name.txt' INTO TABLE tbl_name
TEXT_FIELDS (text_field1, text_field2, text_field3)
SET table_field1=concatenate(text_field1, text_field2), table_field3=23
IGNORE text_field3
+
+This can be used to skip over extra columns in the text file, update columns
+based on expressions of the read data...
+on the in-data and
@end example
@item
@code{LOAD DATA INFILE 'file_name' INTO TABLE 'table_name' ERRORS TO err_table_name}
@@ -39581,9 +39504,6 @@ and maybe
data_line - the line from the data file
@end example
@item
-We should extend @code{LOAD DATA INFILE} so that we can skip over extra
-columns in the text file.
-@item
Add true @code{VARCHAR} support (There is already support for this in MyISAM).
@item
Automatic output from @code{mysql} to netscape.
diff --git a/heap/hp_rnext.c b/heap/hp_rnext.c
index 1b775136655..6aa3cf06d97 100644
--- a/heap/hp_rnext.c
+++ b/heap/hp_rnext.c
@@ -37,7 +37,7 @@ int heap_rnext(HP_INFO *info, byte *record)
pos=0; /* Read next after last */
my_errno=HA_ERR_KEY_NOT_FOUND;
}
- else if (!info->current_ptr && (info->update & HA_STATE_PREV_FOUND))
+ else if (!info->current_ptr) /* Deleted or first call */
pos= _hp_search(info,share->keydef+info->lastinx, info->lastkey, 0);
else
pos= _hp_search(info,share->keydef+info->lastinx, info->lastkey, 1);
diff --git a/include/config-win.h b/include/config-win.h
index 3a541e9ca88..399b28bbe24 100644
--- a/include/config-win.h
+++ b/include/config-win.h
@@ -85,9 +85,6 @@
typedef unsigned short ushort;
typedef unsigned int uint;
-#ifndef _WIN64
-typedef unsigned int size_t;
-#endif
typedef unsigned __int64 ulonglong; /* Microsofts 64 bit types */
typedef __int64 longlong;
typedef int sigset_t;
@@ -96,6 +93,12 @@ typedef int sigset_t;
Use my_off_t or os_off_t instead */
typedef long off_t;
typedef __int64 os_off_t;
+#ifdef _WIN64
+typedef UINT_PTR rf_SetTimer;
+#else
+typedef unsigned int size_t;
+typedef uint rf_SetTimer;
+#endif
#define Socket_defined
#define my_socket SOCKET
@@ -288,4 +291,3 @@ inline double ulonglong2double(ulonglong value)
#define statistic_add(V,C,L) (V)+=(C)
#endif
#define statistic_increment(V,L) thread_safe_increment((V),(L))
-
diff --git a/include/global.h b/include/global.h
index bb7ee1b6cba..c56b1f83dc3 100644
--- a/include/global.h
+++ b/include/global.h
@@ -572,7 +572,9 @@ typedef long longlong;
#endif
#undef SIZEOF_OFF_T
#define SIZEOF_OFF_T 8
-#endif
+#else
+#define SYSTEM_SIZEOF_OFF_T SIZEOF_OFF_T
+#endif /* USE_RAID */
#if SIZEOF_OFF_T > 4
typedef ulonglong my_off_t;
diff --git a/include/my_base.h b/include/my_base.h
index 429c7132444..5b16e79798d 100644
--- a/include/my_base.h
+++ b/include/my_base.h
@@ -262,4 +262,11 @@ typedef ulong ha_rows;
#define HA_POS_ERROR (~ (ha_rows) 0)
#define HA_OFFSET_ERROR (~ (my_off_t) 0)
+
+#if SYSTEM_SIZEOF_OFF_T == 4
+#define MAX_FILE_SIZE INT_MAX32
+#else
+#define MAX_FILE_SIZE LONGLONG_MAX
+#endif
+
#endif /* _my_base_h */
diff --git a/include/myisam.h b/include/myisam.h
index 2b8f75b2050..a2a17636b1d 100644
--- a/include/myisam.h
+++ b/include/myisam.h
@@ -43,6 +43,8 @@ extern "C" {
#define MI_MAX_MSG_BUF 1024 /* used in CHECK TABLE, REPAIR TABLE */
#define MI_NAME_IEXT ".MYI"
#define MI_NAME_DEXT ".MYD"
+/* Max extra space to use when sorting keys */
+#define MI_MAX_TEMP_LENGTH 1024L*1024L*1024L
#define mi_portable_sizeof_char_ptr 8
@@ -263,9 +265,13 @@ extern uint mi_get_pointer_length(ulonglong file_length, uint def);
#define T_MEDIUM T_READONLY*2
#define T_AUTO_INC T_MEDIUM*2
#define T_CHECK T_AUTO_INC*2
-#define T_UPDATE_STATE T_CHECK*2
-#define T_CHECK_ONLY_CHANGED T_UPDATE_STATE*2
-#define T_DONT_CHECK_CHECKSUM T_CHECK_ONLY_CHANGED*2
+#define T_UPDATE_STATE T_CHECK*2
+#define T_CHECK_ONLY_CHANGED T_UPDATE_STATE*2
+#define T_DONT_CHECK_CHECKSUM T_CHECK_ONLY_CHANGED*2
+#define T_TRUST_HEADER T_DONT_CHECK_CHECKSUM*2
+#define T_CREATE_MISSING_KEYS T_TRUST_HEADER*2
+#define T_SAFE_REPAIR T_CREATE_MISSING_KEYS*2
+#define T_AUTO_REPAIR T_SAFE_REPAIR*2
#define O_NEW_INDEX 1 /* Bits set in out_flag */
#define O_NEW_DATA 2
@@ -357,8 +363,8 @@ int lock_file(MI_CHECK *param, File file, my_off_t start, int lock_type,
const char *filetype, const char *filename);
void lock_memory(MI_CHECK *param);
int flush_blocks(MI_CHECK *param, File file);
- void update_auto_increment_key(MI_CHECK *param, MI_INFO *info,
- my_bool repair);
+void update_auto_increment_key(MI_CHECK *param, MI_INFO *info,
+ my_bool repair);
int update_state_info(MI_CHECK *param, MI_INFO *info,uint update);
int filecopy(MI_CHECK *param, File to,File from,my_off_t start,
my_off_t length, const char *type);
@@ -370,6 +376,8 @@ int _create_index_by_sort(MI_SORT_PARAM *info,my_bool no_messages,
ulong);
int test_if_almost_full(MI_INFO *info);
int recreate_table(MI_CHECK *param, MI_INFO **org_info, char *filename);
+void mi_dectivate_non_unique_index(MI_INFO *info, ha_rows rows);
+my_bool mi_test_if_sort_rep(MI_INFO *info, ha_rows rows);
#ifdef __cplusplus
}
diff --git a/isam/_dynrec.c b/isam/_dynrec.c
index 6d4a491304c..f26e491a58c 100644
--- a/isam/_dynrec.c
+++ b/isam/_dynrec.c
@@ -1,15 +1,15 @@
/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB
-
+
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
-
+
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
-
+
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
@@ -63,8 +63,8 @@ int _nisam_write_blob_record(N_INFO *info, const byte *record)
extra=ALIGN_SIZE(MAX_DYN_BLOCK_HEADER)+N_SPLITT_LENGTH+
DYN_DELETE_BLOCK_HEADER;
if (!(rec_buff=(byte*) my_alloca(info->s->base.pack_reclength+
- _calc_total_blob_length(info,record)+
- extra)))
+ _calc_total_blob_length(info,record)+
+ extra)))
return(-1);
reclength=_nisam_rec_pack(info,rec_buff+ALIGN_SIZE(MAX_DYN_BLOCK_HEADER),
record);
@@ -84,8 +84,8 @@ int _nisam_update_blob_record(N_INFO *info, ulong pos, const byte *record)
extra=ALIGN_SIZE(MAX_DYN_BLOCK_HEADER)+N_SPLITT_LENGTH+
DYN_DELETE_BLOCK_HEADER;
if (!(rec_buff=(byte*) my_alloca(info->s->base.pack_reclength+
- _calc_total_blob_length(info,record)+
- extra)))
+ _calc_total_blob_length(info,record)+
+ extra)))
return(-1);
reclength=_nisam_rec_pack(info,rec_buff+ALIGN_SIZE(MAX_DYN_BLOCK_HEADER),
record);
diff --git a/isam/_search.c b/isam/_search.c
index 3c3f62c3a2a..93a08ea2b0d 100644
--- a/isam/_search.c
+++ b/isam/_search.c
@@ -342,7 +342,7 @@ int _nisam_key_cmp(register N_KEYSEG *keyseg, register uchar *a, register uchar
else
{
if ((flag = my_strnncoll(default_charset_info,
- a, (end-a), b, b_length)))
+ a, (int) (end-a), b, b_length)))
return (keyseg->base.flag & HA_REVERSE_SORT) ? -flag : flag;
b+= (uint) (end-a);
a=end;
@@ -393,7 +393,7 @@ int _nisam_key_cmp(register N_KEYSEG *keyseg, register uchar *a, register uchar
else
{
if ((flag = my_strnncoll(default_charset_info,
- a, (end-a), b, (end-a))))
+ a, (int) (end-a), b, (int) (end-a))))
return (keyseg->base.flag & HA_REVERSE_SORT) ? -flag : flag;
b+= (uint) (end-a);
a=end;
diff --git a/merge/rrnd.c b/merge/rrnd.c
index 15a50c0b89f..0abd72a3edd 100644
--- a/merge/rrnd.c
+++ b/merge/rrnd.c
@@ -82,7 +82,7 @@ int mrg_rrnd(MRG_INFO *info,byte *buf,mrg_off_t filepos)
}
}
info->current_table=find_table(info->open_tables,
- info->last_used_table,filepos);
+ info->end_table,filepos);
isam_info=info->current_table->table;
isam_info->update&= HA_STATE_CHANGED;
return ((*isam_info->s->read_rnd)(isam_info,(byte*) buf,
diff --git a/myisam/ft_eval.c b/myisam/ft_eval.c
index b8628724642..eeb414a7505 100644
--- a/myisam/ft_eval.c
+++ b/myisam/ft_eval.c
@@ -84,7 +84,7 @@ int main(int argc,char *argv[])
for(i=1;create_record(record,qf);i++) {
FT_DOCLIST *result; double w; int t;
- result=ft_init_search(file,0,blob_record,strlen(blob_record),1);
+ result=ft_init_search(file,0,blob_record,(uint) strlen(blob_record),1);
if(!result) {
printf("Query %d failed with errno %3d\n",i,my_errno);
goto err;
@@ -177,7 +177,7 @@ int create_record(char *pos, FILE *file)
{
if(feof(file)) return 0; else print_error(1,"fgets(docid) - 1");
}
- tmp=strlen(pos+2)-1;
+ tmp=(uint) strlen(pos+2)-1;
int2store(pos,tmp);
pos+=recinfo[0].length;
@@ -185,7 +185,7 @@ int create_record(char *pos, FILE *file)
if(!(fgets(blob_record,MAX_BLOB_LENGTH,file)))
print_error(1,"fgets(docid) - 2");
- tmp=strlen(blob_record);
+ tmp=(uint) strlen(blob_record);
int4store(pos,tmp);
ptr=blob_record;
memcpy_fixed(pos+4,&ptr,sizeof(char*));
diff --git a/myisam/ft_parser.c b/myisam/ft_parser.c
index e2fcd2b00a1..588f5831dce 100644
--- a/myisam/ft_parser.c
+++ b/myisam/ft_parser.c
@@ -140,7 +140,7 @@ TREE * ft_parse(TREE *wtree, byte *doc, int doclen)
if(word_char(*doc)) break;
for(w.pos=doc; doc<end; doc++)
if(!word_char(*doc)) break;
- if((w.len=doc-w.pos) < MIN_WORD_LEN) continue;
+ if((w.len= (uint) (doc-w.pos)) < MIN_WORD_LEN) continue;
if(!tree_insert(wtree, &w, 0))
{
delete_tree(wtree);
diff --git a/myisam/ft_static.c b/myisam/ft_static.c
index 5cbcff85b54..34b9368d522 100644
--- a/myisam/ft_static.c
+++ b/myisam/ft_static.c
@@ -19,19 +19,19 @@
#include "ftdefs.h"
const MI_KEYSEG ft_keysegs[FT_SEGS]={
- {
- HA_KEYTYPE_VARTEXT, // type
- 7, // language
- 0, 0, 0, // null_bit, bit_start, bit_end
- HA_VAR_LENGTH | HA_PACK_KEY, // flag
- HA_FT_MAXLEN, // length
+{
+ HA_KEYTYPE_VARTEXT, /* type */
+ 7, /* language */
+ 0, 0, 0, /* null_bit, bit_start, bit_end */
+ HA_VAR_LENGTH | HA_PACK_KEY, /* flag */
+ HA_FT_MAXLEN, /* length */
#ifdef EVAL_RUN
- HA_FT_WLEN+1, // start
+ HA_FT_WLEN+1, /* start */
#else /* EVAL_RUN */
- HA_FT_WLEN, // start
+ HA_FT_WLEN, /* start */
#endif /* EVAL_RUN */
- 0, // null_pos
- NULL // sort_order
+ 0, /* null_pos */
+ NULL /* sort_order */
},
#ifdef EVAL_RUN
{
diff --git a/myisam/mi_check.c b/myisam/mi_check.c
index 5b5583b136e..f8667afb46a 100644
--- a/myisam/mi_check.c
+++ b/myisam/mi_check.c
@@ -220,7 +220,7 @@ static int check_k_link(MI_CHECK *param, register MI_INFO *info, uint nr)
if (next_link > info->state->key_file_length ||
next_link & (info->s->blocksize-1))
DBUG_RETURN(1);
- if (!(buff=key_cache_read(info->s->kfile, next_link, info->buff,
+ if (!(buff=key_cache_read(info->s->kfile, next_link, (byte*) info->buff,
myisam_block_size, block_size, 1)))
DBUG_RETURN(1);
next_link=mi_sizekorr(buff);
@@ -1228,6 +1228,8 @@ err:
VOID(my_raid_delete(param->temp_filename,info->s->base.raid_chunks,
MYF(MY_WME)));
}
+ mi_mark_crashed_on_repair(info);
+ info->update|= HA_STATE_CHANGED;
}
if (sort_info->record)
my_free(sort_info->record,MYF(0));
@@ -1580,7 +1582,7 @@ err:
DBUG_RETURN(1);
}
- /* Fix table using sorting */
+ /* Fix table or given index using sorting */
/* saves new table in temp_filename */
int mi_repair_by_sort(MI_CHECK *param, register MI_INFO *info,
@@ -1597,6 +1599,7 @@ int mi_repair_by_sort(MI_CHECK *param, register MI_INFO *info,
ulong *rec_per_key_part;
char llbuff[22];
SORT_INFO *sort_info= &param->sort_info;
+ ulonglong key_map=share->state.key_map;
DBUG_ENTER("rep_by_sort");
start_records=info->state->records;
@@ -1621,7 +1624,7 @@ int mi_repair_by_sort(MI_CHECK *param, register MI_INFO *info,
init_io_cache(&info->rec_cache,info->dfile,
(uint) param->write_buffer_length,
WRITE_CACHE,new_header_length,1,
- MYF(MY_WME | MY_WAIT_IF_FULL))))
+ MYF(MY_WME | MY_WAIT_IF_FULL) & param->myf_rw)))
goto err;
sort_info->key_block_end=sort_info->key_block+param->sort_key_blocks;
info->opt_flag|=WRITE_CACHE_USED;
@@ -1664,10 +1667,15 @@ int mi_repair_by_sort(MI_CHECK *param, register MI_INFO *info,
}
info->update= (short) (HA_STATE_CHANGED | HA_STATE_ROW_CHANGED);
- for (i=0 ; i < share->base.keys ; i++)
- share->state.key_root[i]= HA_OFFSET_ERROR;
- for (i=0 ; i < share->state.header.max_block_size ; i++)
- share->state.key_del[i]= HA_OFFSET_ERROR;
+ if (!(param->testflag & T_CREATE_MISSING_KEYS))
+ {
+ for (i=0 ; i < share->base.keys ; i++)
+ share->state.key_root[i]= HA_OFFSET_ERROR;
+ for (i=0 ; i < share->state.header.max_block_size ; i++)
+ share->state.key_del[i]= HA_OFFSET_ERROR;
+ }
+ else
+ key_map= ~key_map; /* Create the missing keys */
info->state->key_file_length=share->base.keystart;
@@ -1696,7 +1704,8 @@ int mi_repair_by_sort(MI_CHECK *param, register MI_INFO *info,
else
length=share->base.pack_reclength;
sort_param.max_records=sort_info->max_records=
- (ha_rows) (sort_info->filelength/length+1);
+ ((param->testflag & T_TRUST_HEADER) ? info->state->records :
+ (ha_rows) (sort_info->filelength/length+1));
sort_param.key_cmp=sort_key_cmp;
sort_param.key_write=sort_key_write;
sort_param.key_read=sort_key_read;
@@ -1714,7 +1723,7 @@ int mi_repair_by_sort(MI_CHECK *param, register MI_INFO *info,
rec_per_key_part+=sort_info->keyinfo->keysegs, sort_info->key++)
{
sort_info->keyinfo=share->keyinfo+sort_info->key;
- if (!(((ulonglong) 1 << sort_info->key) & share->state.key_map))
+ if (!(((ulonglong) 1 << sort_info->key) & key_map))
continue;
if ((!(param->testflag & T_SILENT)))
@@ -1755,6 +1764,15 @@ int mi_repair_by_sort(MI_CHECK *param, register MI_INFO *info,
param->read_cache.end_of_file=sort_info->filepos;
if (write_data_suffix(param,info) || end_io_cache(&info->rec_cache))
goto err;
+ if (param->testflag & T_SAFE_REPAIR)
+ {
+ /* Don't repair if we loosed more than one row */
+ if (info->state->records+1 < start_records)
+ {
+ info->state->records=start_records;
+ goto err;
+ }
+ }
share->state.state.data_file_length = info->state->data_file_length
= sort_info->filepos;
/* Only whole records */
@@ -1837,6 +1855,8 @@ err:
VOID(my_raid_delete(param->temp_filename,info->s->base.raid_chunks,
MYF(MY_WME)));
}
+ mi_mark_crashed_on_repair(info);
+ info->update|= HA_STATE_CHANGED;
}
my_free((gptr) sort_info->key_block,MYF(MY_ALLOW_ZERO_PTR));
my_free(sort_info->record,MYF(MY_ALLOW_ZERO_PTR));
@@ -1844,7 +1864,7 @@ err:
VOID(end_io_cache(&param->read_cache));
VOID(end_io_cache(&info->rec_cache));
info->opt_flag&= ~(READ_CACHE_USED | WRITE_CACHE_USED);
- if (!got_error && param->testflag & T_UNPACK)
+ if (!got_error && (param->testflag & T_UNPACK))
{
share->state.header.options[0]&= (uchar) ~HA_OPTION_COMPRESS_RECORD;
share->pack.header_length=0;
@@ -2884,3 +2904,58 @@ ha_checksum mi_byte_checksum(const byte *buf, uint length)
test(crc & (((ha_checksum) 1) << (8*sizeof(ha_checksum)-1)));
return crc;
}
+
+/*
+ Deactive all not unique index that can be recreated fast
+ These include packed keys on which sorting will use more temporary
+ space than the max allowed file length or for which the unpacked keys
+ will take much more space than packed keys.
+ Note that 'rows' may be zero for the case when we don't know how many
+ rows we will put into the file.
+ */
+
+static my_bool mi_too_big_key_for_sort(MI_KEYDEF *key, ha_rows rows)
+{
+ return (key->flag & (HA_BINARY_PACK_KEY | HA_VAR_LENGTH_KEY | HA_FULLTEXT) &&
+ ((ulonglong) rows * key->maxlength > MAX_FILE_SIZE ||
+ (ulonglong) rows * (key->maxlength - key->minlength) / 2 >
+ MI_MAX_TEMP_LENGTH ||
+ (rows == 0 && (key->maxlength / key->minlength) > 2)));
+}
+
+
+void mi_dectivate_non_unique_index(MI_INFO *info, ha_rows rows)
+{
+ MYISAM_SHARE *share=info->s;
+ uint i;
+ if (!info->state->records) /* Don't do this if old rows */
+ {
+ MI_KEYDEF *key=share->keyinfo;
+ for (i=0 ; i < share->base.keys ; i++,key++)
+ {
+ if (!(key->flag & HA_NOSAME) && ! mi_too_big_key_for_sort(key,rows))
+ {
+ share->state.key_map&= ~ ((ulonglong) 1 << i);
+ info->update|= HA_STATE_CHANGED;
+ }
+ }
+ }
+}
+
+
+/* Return TRUE if we can use repair by sorting */
+
+my_bool mi_test_if_sort_rep(MI_INFO *info, ha_rows rows)
+{
+ MYISAM_SHARE *share=info->s;
+ uint i;
+ MI_KEYDEF *key=share->keyinfo;
+ if (!share->state.key_map)
+ return FALSE; /* Can't use sort */
+ for (i=0 ; i < share->base.keys ; i++,key++)
+ {
+ if (mi_too_big_key_for_sort(key,rows))
+ return FALSE;
+ }
+ return TRUE;
+}
diff --git a/myisam/mi_locking.c b/myisam/mi_locking.c
index 2ef62c23430..24a23bda7e2 100644
--- a/myisam/mi_locking.c
+++ b/myisam/mi_locking.c
@@ -74,12 +74,12 @@ int mi_lock_database(MI_INFO *info, int lock_type)
share->state.process= share->last_process=share->this_process;
share->state.unique= info->last_unique= info->this_unique;
#ifndef HAVE_PREAD
- pthread_mutex_lock(&THR_LOCK_keycache); // QQ; Has to be removed!
+ pthread_mutex_lock(&THR_LOCK_keycache); /* QQ; Has to be removed! */
#endif
if (mi_state_info_write(share->kfile, &share->state, 1))
error=my_errno;
#ifndef HAVE_PREAD
- pthread_mutex_unlock(&THR_LOCK_keycache);// QQ; Has to be removed!
+ pthread_mutex_unlock(&THR_LOCK_keycache);/* QQ; Has to be removed! */
#endif
share->changed=0;
if (myisam_flush)
diff --git a/myisam/myisamdef.h b/myisam/myisamdef.h
index 13bb2e7efad..e1263c89ff5 100644
--- a/myisam/myisamdef.h
+++ b/myisam/myisamdef.h
@@ -284,7 +284,9 @@ struct st_myisam_info {
mi_int2store(x,boh); }
#define mi_test_if_nod(x) (x[0] & 128 ? info->s->base.key_reflength : 0)
#define mi_mark_crashed(x) (x)->s->state.changed|=2
+#define mi_mark_crashed_on_repair(x) (x)->s->state.changed|=4+2
#define mi_is_crashed(x) ((x)->s->state.changed & 2)
+#define mi_is_crashed_on_repair(x) ((x)->s->state.changed & 4)
/* Functions to store length of space packed keys, VARCHAR or BLOB keys */
@@ -606,6 +608,7 @@ void mi_get_status(void* param);
void mi_update_status(void* param);
void mi_copy_status(void* to,void *from);
my_bool mi_check_status(void* param);
+void mi_dectivate_non_unique_index(MI_INFO *info, ha_rows rows);
/* Functions needed by mi_check */
#ifdef __cplusplus
diff --git a/myisam/myisampack.c b/myisam/myisampack.c
index 3421f77c8d8..23d7f494994 100644
--- a/myisam/myisampack.c
+++ b/myisam/myisampack.c
@@ -249,7 +249,7 @@ static struct option long_options[] =
static void print_version(void)
{
- printf("%s Ver 1.7 for %s on %s\n",my_progname,SYSTEM_TYPE,MACHINE_TYPE);
+ printf("%s Ver 1.8 for %s on %s\n",my_progname,SYSTEM_TYPE,MACHINE_TYPE);
}
static void usage(void)
diff --git a/myisammrg/myrg_rrnd.c b/myisammrg/myrg_rrnd.c
index 93c7282623d..da11b230f27 100644
--- a/myisammrg/myrg_rrnd.c
+++ b/myisammrg/myrg_rrnd.c
@@ -1,15 +1,15 @@
/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB
-
+
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
-
+
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
-
+
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
diff --git a/mysql.proj b/mysql.proj
index 85a0fca7313..820b81dbc90 100644
--- a/mysql.proj
+++ b/mysql.proj
Binary files differ
diff --git a/mysys/charset.c b/mysys/charset.c
index 88b0972431e..bf51184589c 100644
--- a/mysys/charset.c
+++ b/mysys/charset.c
@@ -112,7 +112,7 @@ static my_bool read_charset_index(TYPELIB *charsets, myf myflags)
while (!get_word(&fb, buf))
{
uint length;
- if (!(s= (char*) my_once_alloc(length=strlen(buf)+1, myflags)))
+ if (!(s= (char*) my_once_alloc(length= (uint) strlen(buf)+1, myflags)))
{
my_fclose(fb.f,myflags);
return TRUE;
@@ -292,7 +292,7 @@ static CHARSET_INFO *add_charset(uint cs_number, const char *cs_name)
cs = (CHARSET_INFO*) my_once_alloc(sizeof(CHARSET_INFO),
MYF(MY_WME));
*cs=tmp_cs;
- cs->name = (char *) my_once_alloc(strlen(cs_name) + 1, MYF(MY_WME));
+ cs->name = (char *) my_once_alloc((uint) strlen(cs_name)+1, MYF(MY_WME));
cs->ctype = (uchar*) my_once_alloc(CTYPE_TABLE_SIZE, MYF(MY_WME));
cs->to_lower = (uchar*) my_once_alloc(TO_LOWER_TABLE_SIZE, MYF(MY_WME));
cs->to_upper = (uchar*) my_once_alloc(TO_UPPER_TABLE_SIZE, MYF(MY_WME));
@@ -410,7 +410,7 @@ my_bool set_default_charset_by_name(const char *cs_name, myf flags)
static my_bool charset_in_string(const char *name, DYNAMIC_STRING *s)
{
- uint length=strlen(name);
+ uint length= (uint) strlen(name);
const char *pos;
for (pos=s->str ; (pos=strstr(pos,name)) ; pos++)
{
diff --git a/mysys/mf_casecnv.c b/mysys/mf_casecnv.c
index 705e1361937..1d63527eb7b 100644
--- a/mysys/mf_casecnv.c
+++ b/mysys/mf_casecnv.c
@@ -32,7 +32,7 @@ void caseup_str(my_string str)
{
#ifdef USE_MB
register uint32 l;
- register char *end=str+strlen(str);
+ register char *end=str+(uint) strlen(str);
if (use_mb(default_charset_info))
while (*str)
{
@@ -51,7 +51,7 @@ void casedn_str(my_string str)
{
#ifdef USE_MB
register uint32 l;
- register char *end=str+strlen(str);
+ register char *end=str+(uint) strlen(str);
if (use_mb(default_charset_info))
while (*str)
{
@@ -144,7 +144,7 @@ int my_strcasecmp(const char *s, const char *t)
{
#ifdef USE_MB
register uint32 l;
- register const char *end=s+strlen(s);
+ register const char *end=s+(uint) strlen(s);
if (use_mb(default_charset_info))
{
while (s<end)
diff --git a/mysys/mf_pack.c b/mysys/mf_pack.c
index 8aff6a3484a..bfa9e5d8711 100644
--- a/mysys/mf_pack.c
+++ b/mysys/mf_pack.c
@@ -53,13 +53,13 @@ void pack_dirname(my_string to, const char *from)
LINT_INIT(buff_length);
if (!(cwd_err= my_getwd(buff,FN_REFLEN,MYF(0))))
{
- buff_length=strlen(buff);
+ buff_length= (uint) strlen(buff);
d_length=(uint) (start-to);
if ((start == to ||
(buff_length == d_length && !bcmp(buff,start,d_length))) &&
*start != FN_LIBCHAR && *start)
{ /* Put current dir before */
- bchange(to,d_length,buff,buff_length,strlen(to)+1);
+ bchange(to,d_length,buff,buff_length,(uint) strlen(to)+1);
}
}
@@ -68,7 +68,7 @@ void pack_dirname(my_string to, const char *from)
length=0;
if (home_dir)
{
- length=strlen(home_dir);
+ length= (uint) strlen(home_dir);
if (home_dir[length-1] == FN_LIBCHAR)
length--; /* Don't test last '/' */
}
@@ -92,7 +92,7 @@ void pack_dirname(my_string to, const char *from)
}
if (is_prefix(to,buff))
{
- length=strlen(buff);
+ length= (uint) strlen(buff);
if (to[length])
(void) strmov_overlapp(to,to+length); /* Remove everything before */
else
@@ -265,7 +265,7 @@ uint unpack_dirname(my_string to, const char *from)
DBUG_ENTER("unpack_dirname");
(void) intern_filename(buff,from); /* Change to intern name */
- length=strlen(buff); /* Fix that '/' is last */
+ length= (uint) strlen(buff); /* Fix that '/' is last */
if (length &&
#ifdef FN_DEVCHAR
buff[length-1] != FN_DEVCHAR &&
@@ -283,7 +283,7 @@ uint unpack_dirname(my_string to, const char *from)
if (tilde_expansion)
{
length-=(uint) (suffix-buff)-1;
- if (length+(h_length=strlen(tilde_expansion)) <= FN_REFLEN)
+ if (length+(h_length= (uint) strlen(tilde_expansion)) <= FN_REFLEN)
{
if (tilde_expansion[h_length-1] == FN_LIBCHAR)
h_length--;
diff --git a/mysys/mf_path.c b/mysys/mf_path.c
index 0a0a760cea1..f7dcf34e6b4 100644
--- a/mysys/mf_path.c
+++ b/mysys/mf_path.c
@@ -46,7 +46,7 @@ my_string my_path(my_string to, const char *progname,
if (!test_if_hard_path(to))
{
if (!my_getwd(curr_dir,FN_REFLEN,MYF(0)))
- bchange(to,0,curr_dir,strlen(curr_dir),strlen(to)+1);
+ bchange(to,0,curr_dir, (uint) strlen(curr_dir), (uint) strlen(to)+1);
}
}
else
diff --git a/mysys/mf_wfile.c b/mysys/mf_wfile.c
index 02e155d9936..87d1392250a 100644
--- a/mysys/mf_wfile.c
+++ b/mysys/mf_wfile.c
@@ -63,7 +63,8 @@ WF_PACK *wf_comp(my_string str)
#endif
if ((ret= (WF_PACK*) my_malloc((uint) ant*(sizeof(my_string*)+2)+
- sizeof(WF_PACK)+strlen(str)+1,MYF(MY_WME)))
+ sizeof(WF_PACK)+ (uint) strlen(str)+1,
+ MYF(MY_WME)))
== 0)
DBUG_RETURN((WF_PACK *) NULL);
ret->wild= (my_string*) (ret+1);
diff --git a/mysys/my_alloc.c b/mysys/my_alloc.c
index 693ffbfab78..45fd2516683 100644
--- a/mysys/my_alloc.c
+++ b/mysys/my_alloc.c
@@ -112,7 +112,7 @@ void free_root(MEM_ROOT *root)
char *strdup_root(MEM_ROOT *root,const char *str)
{
- uint len=strlen(str)+1;
+ uint len= (uint) strlen(str)+1;
char *pos;
if ((pos=alloc_root(root,len)))
memcpy(pos,str,len);
diff --git a/mysys/my_lib.c b/mysys/my_lib.c
index b8554b08d28..0f4a5261fba 100644
--- a/mysys/my_lib.c
+++ b/mysys/my_lib.c
@@ -353,7 +353,11 @@ myf MyFlags;
ushort mode;
char tmp_path[FN_REFLEN],*tmp_file,attrib;
my_ptrdiff_t diff;
+#ifdef _WIN64
+ __int64 handle;
+#else
long handle;
+#endif
DBUG_ENTER("my_dir");
DBUG_PRINT("my",("path: '%s' stat: %d MyFlags: %d",path,MyFlags));
diff --git a/mysys/my_lread.c b/mysys/my_lread.c
index c3b8a6704c3..94ed258151e 100644
--- a/mysys/my_lread.c
+++ b/mysys/my_lread.c
@@ -32,7 +32,7 @@ uint32 my_lread(int Filedes, byte *Buffer, uint32 Count, myf MyFlags)
Filedes, Buffer, Count, MyFlags));
/* Temp hack to get count to int32 while read wants int */
- if ((readbytes = (uint32) read(Filedes, Buffer, (size_t) Count)) != Count)
+ if ((readbytes = (uint32) read(Filedes, Buffer, (uint) Count)) != Count)
{
my_errno=errno;
if (MyFlags & (MY_WME | MY_FAE | MY_FNABP))
diff --git a/mysys/my_lwrite.c b/mysys/my_lwrite.c
index 201c36f619c..734916173ce 100644
--- a/mysys/my_lwrite.c
+++ b/mysys/my_lwrite.c
@@ -28,7 +28,7 @@ uint32 my_lwrite(int Filedes, const byte *Buffer, uint32 Count, myf MyFlags)
Filedes, Buffer, Count, MyFlags));
/* Temp hack to get count to int32 while write wants int */
- if ((writenbytes = (uint32) write(Filedes, Buffer, (size_t) Count)) != Count)
+ if ((writenbytes = (uint32) write(Filedes, Buffer, (uint) Count)) != Count)
{
my_errno=errno;
if (writenbytes == (uint32) -1 || MyFlags & (MY_NABP | MY_FNABP))
diff --git a/mysys/my_winthread.c b/mysys/my_winthread.c
index 436fc954d93..7a1e1365325 100644
--- a/mysys/my_winthread.c
+++ b/mysys/my_winthread.c
@@ -83,11 +83,11 @@ int pthread_create(pthread_t *thread_id, pthread_attr_t *attr,
*thread_id=map->pthreadself=hThread;
pthread_mutex_unlock(&THR_LOCK_thread);
- if ((long) hThread == -1L)
+ if (hThread == (HANDLE) -1)
{
- long error=errno;
+ int error=errno;
DBUG_PRINT("error",
- ("Can't create thread to handle request (error %ld)",error));
+ ("Can't create thread to handle request (error %d)",error));
DBUG_RETURN(error ? error : -1);
}
VOID(SetThreadPriority(hThread, attr->priority)) ;
diff --git a/mysys/string.c b/mysys/string.c
index f7e265a43e6..0696c72b922 100644
--- a/mysys/string.c
+++ b/mysys/string.c
@@ -33,7 +33,7 @@ my_bool init_dynamic_string(DYNAMIC_STRING *str, const char *init_str,
if (!alloc_increment)
alloc_increment=128;
length=1;
- if (init_str && (length=strlen(init_str)+1) < init_alloc)
+ if (init_str && (length= (uint) strlen(init_str)+1) < init_alloc)
init_alloc=((length+alloc_increment-1)/alloc_increment)*alloc_increment;
if (!init_alloc)
init_alloc=alloc_increment;
@@ -53,7 +53,7 @@ my_bool dynstr_set(DYNAMIC_STRING *str, const char *init_str)
uint length;
DBUG_ENTER("dynstr_set");
- if (init_str && (length=strlen(init_str)+1) > str->max_length)
+ if (init_str && (length= (uint) strlen(init_str)+1) > str->max_length)
{
str->max_length=((length+str->alloc_increment-1)/str->alloc_increment)*
str->alloc_increment;
diff --git a/mysys/thr_alarm.c b/mysys/thr_alarm.c
index a681c1b110d..de8d3718c11 100644
--- a/mysys/thr_alarm.c
+++ b/mysys/thr_alarm.c
@@ -831,7 +831,7 @@ bool thr_alarm(thr_alarm_t *alrm, uint sec, ALARM *alarm)
alrm->crono=0;
return 1;
}
- if (!(alrm->crono=SetTimer(NULL,0,(long) sec*1000L, (TIMERPROC) NULL)))
+ if (!(alrm->crono=SetTimer((HWND) NULL,0, sec*1000,(TIMERPROC) NULL)))
return 1;
return 0;
}
diff --git a/mysys/thr_rwlock.c b/mysys/thr_rwlock.c
index fc2eea551da..37630956e7f 100644
--- a/mysys/thr_rwlock.c
+++ b/mysys/thr_rwlock.c
@@ -87,7 +87,7 @@ int my_rw_rdlock( rw_lock_t *rwp ) {
pthread_mutex_lock(&rwp->lock);
/* active or queued writers */
- while ( ( rwp->state < 0 ) && rwp->waiters )
+ while ( ( rwp->state < 0 ) || rwp->waiters )
pthread_cond_wait( &rwp->readers, &rwp->lock);
rwp->state++;
@@ -103,12 +103,8 @@ int my_rw_wrlock( rw_lock_t *rwp ) {
while ( rwp->state )
pthread_cond_wait( &rwp->writers, &rwp->lock);
-
rwp->state = -1;
-
- if ( ( --rwp->waiters == 0 ) && ( rwp->state >= 0 ) )
- pthread_cond_broadcast( &rwp->readers );
-
+ --rwp->waiters;
pthread_mutex_unlock( &rwp->lock );
return( 0 );
diff --git a/regex/regcomp.c b/regex/regcomp.c
index 5990b121f9b..885c6a7cbc8 100644
--- a/regex/regcomp.c
+++ b/regex/regcomp.c
@@ -132,7 +132,7 @@ int cflags;
(NC-1)*sizeof(cat_t));
if (g == NULL)
return(REG_ESPACE);
- p->ssize = len/(size_t)2*(size_t)3 + (size_t)1; /* ugh */
+ p->ssize = (long) (len/(size_t)2*(size_t)3 + (size_t)1); /* ugh */
p->strip = (sop *)malloc(p->ssize * sizeof(sop));
p->slen = 0;
if (p->strip == NULL) {
@@ -268,7 +268,7 @@ register struct parse *p;
case '(':
REQUIRE(MORE(), REG_EPAREN);
p->g->nsub++;
- subno = p->g->nsub;
+ subno = (sopno) p->g->nsub;
if (subno < NPAREN)
p->pbegin[subno] = HERE();
EMIT(OLPAREN, subno);
@@ -488,7 +488,7 @@ int starordinary; /* is a leading * an ordinary character? */
break;
case BACKSL|'(':
p->g->nsub++;
- subno = p->g->nsub;
+ subno = (sopno) p->g->nsub;
if (subno < NPAREN)
p->pbegin[subno] = HERE();
EMIT(OLPAREN, subno);
@@ -811,8 +811,11 @@ int endc; /* name ended by endc,']' */
{
register char *sp = p->next;
register struct cname *cp;
+#ifdef _WIN64
+ register __int64 len;
+#else
register int len;
-
+#endif
while (MORE() && !SEETWO(endc, ']'))
NEXT();
if (!MORE()) {
@@ -1076,9 +1079,9 @@ register cset *cs;
register size_t css = (size_t)p->g->csetsize;
for (i = 0; i < css; i++)
- CHsub(cs, i);
+ CHsub(cs, i);
if (cs == top-1) /* recover only the easy case */
- p->g->ncsets--;
+ p->g->ncsets--;
}
/*
diff --git a/regex/regex.h b/regex/regex.h
index fb2cd29ea46..99a0077251e 100644
--- a/regex/regex.h
+++ b/regex/regex.h
@@ -6,7 +6,11 @@ extern "C" {
#endif
/* === regex2.h === */
+#ifdef _WIN64
+typedef __int64 regoff_t;
+#else
typedef off_t regoff_t;
+#endif
struct re_guts; /* none of your business :-) */
typedef struct {
int re_magic;
diff --git a/regex/regex2.h b/regex/regex2.h
index 2023716eb4a..0d94baa310f 100644
--- a/regex/regex2.h
+++ b/regex/regex2.h
@@ -95,8 +95,8 @@ typedef struct {
char *multis; /* -> char[smulti] ab\0cd\0ef\0\0 */
} cset;
/* note that CHadd and CHsub are unsafe, and CHIN doesn't yield 0/1 */
-#define CHadd(cs, c) ((cs)->ptr[(uch)(c)] |= (cs)->mask, (cs)->hash += (c))
-#define CHsub(cs, c) ((cs)->ptr[(uch)(c)] &= ~(cs)->mask, (cs)->hash -= (c))
+#define CHadd(cs, c) ((cs)->ptr[(uch)(c)] |= (cs)->mask, (cs)->hash += (uch) (c))
+#define CHsub(cs, c) ((cs)->ptr[(uch)(c)] &= ~(cs)->mask, (cs)->hash -= (uch)(c))
#define CHIN(cs, c) ((cs)->ptr[(uch)(c)] & (cs)->mask)
#define MCadd(p, cs, cp) mcadd(p, cs, cp) /* regcomp() internal fns */
#define MCsub(p, cs, cp) mcsub(p, cs, cp)
diff --git a/scripts/mysqlhotcopy.sh b/scripts/mysqlhotcopy.sh
index 85a92c6ea88..bc150ff61a5 100755
--- a/scripts/mysqlhotcopy.sh
+++ b/scripts/mysqlhotcopy.sh
@@ -25,7 +25,7 @@ WARNING: THIS IS VERY MUCH A FIRST-CUT ALPHA. Comments/patches welcome.
# Documentation continued at end of file
-my $VERSION = sprintf("%d.%02d", q$Revision$ =~ /(\d+)\.(\d+)/o);
+my $VERSION = "1.5";
my $OPTIONS = <<"_OPTIONS";
diff --git a/sql-bench/bench-init.pl.sh b/sql-bench/bench-init.pl.sh
index aca915f2914..b6552ccb48f 100755
--- a/sql-bench/bench-init.pl.sh
+++ b/sql-bench/bench-init.pl.sh
@@ -395,6 +395,11 @@ All benchmarks takes the following options:
extra information that 'uname -a' doesn\'t give and if the database was
stared with some specific, non default, options.
+--cmp=server[,server...]
+ Run the test with limits from the given servers. If you run all servers
+ with the same --cmp, you will get a test that is comparable between
+ the different sql servers.
+
--database (Default $opt_database)
In which database the test tables are created.
diff --git a/sql-bench/server-cfg.sh b/sql-bench/server-cfg.sh
index f4d16b908b7..a074efda4d9 100755
--- a/sql-bench/server-cfg.sh
+++ b/sql-bench/server-cfg.sh
@@ -535,23 +535,23 @@ sub new
$limits{'group_functions'} = 1;
$limits{'group_distinct_functions'}= 1; # Have count(distinct)
$limits{'having_with_alias'} = 0;
- $limits{'having_with_group'} = 0;
+ $limits{'having_with_group'} = 1;
$limits{'left_outer_join'} = 0;
$limits{'like_with_column'} = 1;
$limits{'lock_tables'} = 0; # in ATIS gives this a problem
- $limits{'max_column_name'} = 32; # Is this true
- $limits{'max_columns'} = 300; # 500 crashes pg 6.3
- $limits{'max_tables'} = 65000; # Should be big enough
- $limits{'max_conditions'} = 9; # This makes Pg real slow
- $limits{'max_index'} = 7; # Is this true ?
- $limits{'max_index_parts'} = 16; # Is this true ?
- $limits{'max_text_size'} = 7000; # 8000 crashes pg 6.3
$limits{'multi_drop'} = 1;
$limits{'order_by_position'} = 1;
- $limits{'query_size'} = 8191;
$limits{'select_without_from'}= 1;
$limits{'subqueries'} = 1;
$limits{'table_wildcard'} = 1;
+ $limits{'max_column_name'} = 32; # Is this true
+ $limits{'max_columns'} = 1000; # 500 crashes pg 6.3
+ $limits{'max_tables'} = 65000; # Should be big enough
+ $limits{'max_conditions'} = 30; # This makes Pg real slow
+ $limits{'max_index'} = 64; # Is this true ?
+ $limits{'max_index_parts'} = 16; # Is this true ?
+ $limits{'max_text_size'} = 7000; # 8000 crashes pg 6.3
+ $limits{'query_size'} = 8191;
# the different cases per query ...
$smds{'q1'} = 'b'; # with time
diff --git a/sql/ChangeLog b/sql/ChangeLog
index 81ce83aa243..65a5bca0bf9 100644
--- a/sql/ChangeLog
+++ b/sql/ChangeLog
@@ -1,3 +1,9 @@
+2000-08-08 Michael Widenius <monty@mysql.com>
+
+* Changed ALTER TABLE and LOAD DATA INFILE to create non unique, small keys
+ after all rows are inserted.
+* Fixed use of UDF function with const arguments in WHERE clause.
+
2000-07-11 Michael Widenius <monty@mysql.com>
* Extended safe_mysqld; Patch by Christian Hammers
diff --git a/sql/field.cc b/sql/field.cc
index c903ea456a5..9018021cb93 100644
--- a/sql/field.cc
+++ b/sql/field.cc
@@ -2418,6 +2418,7 @@ bool Field_timestamp::get_date(TIME *ltime,
ltime->second= start->tm_sec;
ltime->second_part= 0;
ltime->neg= 0;
+ ltime->time_type=TIMESTAMP_FULL;
}
return 0;
}
@@ -3005,6 +3006,7 @@ bool Field_newdate::get_date(TIME *ltime,bool fuzzydate)
ltime->day= tmp & 31;
ltime->month= (tmp >> 5) & 15;
ltime->year= (tmp >> 9);
+ ltime->time_type=TIMESTAMP_DATE;
return (!fuzzydate && (!ltime->month || !ltime->day) && ltime->year) ? 1 : 0;
}
@@ -3181,6 +3183,7 @@ bool Field_datetime::get_date(TIME *ltime,bool fuzzydate)
part1=(long) (tmp/LL(1000000));
part2=(long) (tmp - (ulonglong) part1*LL(1000000));
+ ltime->time_type= TIMESTAMP_FULL;
ltime->neg=0;
ltime->second_part=0;
ltime->second= part2%100;
diff --git a/sql/filesort.cc b/sql/filesort.cc
index 44b7a7ab42e..ae07bec3323 100644
--- a/sql/filesort.cc
+++ b/sql/filesort.cc
@@ -192,7 +192,7 @@ ha_rows filesort(TABLE **table, SORT_FIELD *sortorder, uint s_length,
memavl=MIN_SORT_MEMORY;
}
param.keys--;
- maxbuffer+=10; /* Some extra range */
+ maxbuffer+=10; /* Some extra range */
if (memavl < MIN_SORT_MEMORY)
{
@@ -209,7 +209,7 @@ ha_rows filesort(TABLE **table, SORT_FIELD *sortorder, uint s_length,
&tempfile, selected_records_file)) ==
HA_POS_ERROR)
goto err;
- if (maxbuffer == 0)
+ if (maxbuffer == 0) // The whole set is in memory
{
if (save_index(&param,sort_keys,(uint) records))
goto err;
diff --git a/sql/gen_lex_hash.cc b/sql/gen_lex_hash.cc
index 3e1f6f15a6b..d3504b36d44 100644
--- a/sql/gen_lex_hash.cc
+++ b/sql/gen_lex_hash.cc
@@ -27,6 +27,7 @@
#include "lex.h"
bool opt_search=0,opt_verbose=0;
+ulong opt_count=100000;
#define max_allowed_array 8000 // Don't generate bigger arrays than this
#define max_symbol 32767 // Use this for 'not found'
@@ -316,6 +317,7 @@ void print_arrays()
static struct option long_options[] =
{
+ {"count", required_argument, 0, 'c'},
{"search", no_argument, 0, 'S'},
{"verbose", no_argument, 0, 'v'},
{"version", no_argument, 0, 'V'},
@@ -328,7 +330,7 @@ static struct option long_options[] =
static void usage(int version)
{
- printf("%s Ver 3.0 Distrib %s, for %s (%s)\n",
+ printf("%s Ver 3.1 Distrib %s, for %s (%s)\n",
my_progname, MYSQL_SERVER_VERSION, SYSTEM_TYPE, MACHINE_TYPE);
if (version)
return;
@@ -337,6 +339,7 @@ static void usage(int version)
puts("This program generates a perfect hashing function for the sql_lex.cc");
printf("Usage: %s [OPTIONS]\n", my_progname);
printf("\n\
+-c, --count=# Try count times to find a optimal hash table\n\
-r, --rnd1=# Set 1 part of rnd value for hash generator\n\
-R, --rnd2=# Set 2 part of rnd value for hash generator\n\
-t, --type=# Set type of char table to generate\n\
@@ -353,10 +356,13 @@ static int get_options(int argc, char **argv)
{
int c,option_index=0;
- while ((c=getopt_long(argc,argv,"?SvVr:R:t:",
+ while ((c=getopt_long(argc,argv,"?SvVc:r:R:t:",
long_options, &option_index)) != EOF)
{
switch(c) {
+ case 'c':
+ opt_count=atol(optarg);
+ break;
case 'r':
best_t1=atol(optarg);
break;
@@ -466,8 +472,7 @@ int main(int argc,char **argv)
int error;
MY_INIT(argv[0]);
- start_value=1277803L; best_t1=331678L; best_t2=4097229L; best_type=1;
- /* mode=5791 add=6 func_type: 0 */
+ start_value=6059524L; best_t1=2194873L; best_t2=4441039L; best_type=4; /* mode=4159 add=8 func_type: 0 */
if (get_options(argc,(char **) argv))
exit(1);
@@ -488,7 +493,7 @@ int main(int argc,char **argv)
start_value, best_t1,best_t2,best_type,best_mod,best_add,
best_functype);
- for (uint i=1 ; i <= 100000 ; i++)
+ for (uint i=1 ; i <= opt_count ; i++)
{
if (i % 10 == 0)
{
@@ -532,7 +537,9 @@ printf("/* This code is generated by gen_lex_hash.cc that seeks for a perfect\nh
print_arrays();
- printf("/* t1= %lu t2=%lu type= %d */\n\n",best_t1,best_t2,best_type);
+ printf("/* start_value=%ldL; best_t1=%ldL; best_t2=%ldL; best_type=%d; */ /* mode=%d add=%d type: %d */\n\n",
+ start_value, best_t1, best_t2,best_type,
+ best_mod, best_add, best_functype);
printf("inline SYMBOL *get_hash_symbol(const char *s,unsigned int length,bool function)\n\
{\n\
diff --git a/sql/ha_berkeley.cc b/sql/ha_berkeley.cc
index f9cafd44839..5d76af832ae 100644
--- a/sql/ha_berkeley.cc
+++ b/sql/ha_berkeley.cc
@@ -228,7 +228,7 @@ berkeley_cmp_packed_key(const DBT *new_key, const DBT *saved_key)
key_length-=length;
saved_key_ptr+=key_part->field->packed_col_length(saved_key_ptr);
}
- return 0;
+ return key->handler.bdb_return_if_eq;
}
@@ -250,7 +250,7 @@ berkeley_cmp_fix_length_key(const DBT *new_key, const DBT *saved_key)
key_length-= key_part->length;
saved_key_ptr+=key_part->length;
}
- return 0;
+ return key->handler.bdb_return_if_eq;
}
@@ -964,6 +964,8 @@ int ha_berkeley::read_row(int error, char *buf, uint keynr, DBT *row,
}
+/* This is only used to read whole keys */
+
int ha_berkeley::index_read_idx(byte * buf, uint keynr, const byte * key,
uint key_len, enum ha_rkey_function find_flag)
{
@@ -982,14 +984,38 @@ int ha_berkeley::index_read(byte * buf, const byte * key,
uint key_len, enum ha_rkey_function find_flag)
{
DBT row;
+ int error;
DBUG_ENTER("index_read");
statistic_increment(ha_read_key_count,&LOCK_status);
bzero((char*) &row,sizeof(row));
- DBUG_RETURN(read_row(cursor->c_get(cursor,
- pack_key(&last_key, active_index,
- key_buff, key, key_len),
- &row, DB_SET),
- buf, active_index, &row, 0));
+ if (key_len == table->key_info[active_index].key_length)
+ {
+ error=read_row(cursor->c_get(cursor, pack_key(&last_key,
+ active_index,
+ key_buff,
+ key, key_len),
+ &row, DB_SET),
+ buf, active_index, &row, 0);
+ }
+ else
+ {
+ /* read of partial key */
+ pack_key(&last_key, active_index, key_buff, key, key_len);
+ /* Store for compare */
+ memcpy(key_buff2, key_buff, last_key.size);
+ ((KEY*) last_key.app_private)->handler.bdb_return_if_eq= -1;
+ error=read_row(cursor->c_get(cursor, &last_key, &row, DB_SET_RANGE),
+ buf, active_index, &row, 0);
+ ((KEY*) last_key.app_private)->handler.bdb_return_if_eq=0;
+ if (!error && find_flag == HA_READ_KEY_EXACT)
+ {
+ /* Check that we didn't find a key that wasn't equal to the current
+ one */
+ if (!error && ::key_cmp(table, key_buff2, active_index, key_len))
+ error=HA_ERR_KEY_NOT_FOUND;
+ }
+ }
+ DBUG_RETURN(error);
}
diff --git a/sql/ha_berkeley.h b/sql/ha_berkeley.h
index 95851363686..9a8872b5df0 100644
--- a/sql/ha_berkeley.h
+++ b/sql/ha_berkeley.h
@@ -121,7 +121,7 @@ class ha_berkeley: public handler
enum ha_rkey_function end_search_flag);
int create(const char *name, register TABLE *form,
- HA_CREATE_INFO *create_info);
+ HA_CREATE_INFO *create_info);
int delete_table(const char *name);
THR_LOCK_DATA **store_lock(THD *thd, THR_LOCK_DATA **to,
enum thr_lock_type lock_type);
diff --git a/sql/ha_myisam.cc b/sql/ha_myisam.cc
index 513a5affcb2..187cf703ce8 100644
--- a/sql/ha_myisam.cc
+++ b/sql/ha_myisam.cc
@@ -30,11 +30,12 @@
#include "../myisam/myisamdef.h"
#endif
-ulong myisam_sort_buffer_size;
#if !defined(HAVE_PREAD)
pthread_mutex_t THR_LOCK_keycache;
#endif
+ulong myisam_sort_buffer_size;
+
/*****************************************************************************
** MyISAM tables
*****************************************************************************/
@@ -57,7 +58,12 @@ static void mi_check_print_msg(MI_CHECK *param, const char* msg_type,
sql_print_error(msgbuf);
return;
}
-
+ if (param->testflag & (T_CREATE_MISSING_KEYS | T_SAFE_REPAIR |
+ T_AUTO_REPAIR))
+ {
+ my_message(ER_NOT_KEYFILE,msgbuf,MYF(MY_WME));
+ return;
+ }
net_store_data(packet, param->table_name);
net_store_data(packet, param->op_name);
net_store_data(packet, msg_type);
@@ -328,29 +334,37 @@ int ha_myisam::analyze(THD *thd)
}
-int ha_myisam::repair(THD* thd, HA_CHECK_OPT* check_opt)
+int ha_myisam::repair(THD* thd, HA_CHECK_OPT *check_opt)
{
if (!file) return HA_CHECK_INTERNAL_ERROR;
int error ;
MI_CHECK param;
- MYISAM_SHARE* share = file->s;
- char fixed_name[FN_REFLEN];
-
+
myisamchk_init(&param);
param.thd = thd;
param.op_name = (char*) "repair";
- param.table_name = table->table_name;
- param.testflag = check_opt->flags | T_SILENT|T_FORCE_CREATE|T_REP_BY_SORT;
- param.sort_buffer_length= check_opt->sort_buffer_size;
+ param.testflag = (check_opt->flags | T_SILENT|T_FORCE_CREATE|T_REP_BY_SORT|
+ T_STATISTICS);
if (check_opt->quick)
param.opt_rep_quick++;
+ param.sort_buffer_length= check_opt->sort_buffer_size;
+ return repair(thd,param);
+}
+
+
+int ha_myisam::repair(THD *thd, MI_CHECK &param)
+{
+ int error;
+ char fixed_name[FN_REFLEN];
+ MYISAM_SHARE* share = file->s;
+
+ param.table_name = table->table_name;
param.tmpfile_createflag = O_RDWR | O_TRUNC;
param.using_global_keycache = 1;
VOID(fn_format(fixed_name,file->filename,"",MI_NAME_IEXT,
4+ (param.opt_follow_links ? 16 : 0)));
-
- if (share->state.key_map)
+ if (mi_test_if_sort_rep(file,file->state->records))
error = mi_repair_by_sort(&param, file, fixed_name, param.opt_rep_quick);
else
error= mi_repair(&param, file, fixed_name, param.opt_rep_quick);
@@ -397,6 +411,38 @@ int ha_myisam::repair(THD* thd, HA_CHECK_OPT* check_opt)
}
+/* Deactive all not unique index that can be recreated fast */
+
+void ha_myisam::deactivate_non_unique_index(ha_rows rows)
+{
+ if (!(specialflag & SPECIAL_SAFE_MODE))
+ mi_dectivate_non_unique_index(file,rows);
+}
+
+
+bool ha_myisam::activate_all_index(THD *thd)
+{
+ int error=0;
+ char fixed_name[FN_REFLEN];
+ MI_CHECK param;
+ MYISAM_SHARE* share = file->s;
+ DBUG_ENTER("activate_all_index");
+ if (share->state.key_map != ((ulonglong) 1L << share->base.keys)-1)
+ {
+ const char *save_proc_info=thd->proc_info;
+ thd->proc_info="creating index";
+ myisamchk_init(&param);
+ param.op_name = (char*) "recreating_index";
+ param.testflag = (T_SILENT | T_REP_BY_SORT |
+ T_STATISTICS | T_CREATE_MISSING_KEYS | T_TRUST_HEADER);
+ param.myf_rw&= ~MY_WAIT_IF_FULL;
+ param.sort_buffer_length= myisam_sort_buffer_size;
+ param.opt_rep_quick++;
+ error=repair(thd,param) != HA_CHECK_OK;
+ thd->proc_info=save_proc_info;
+ }
+ DBUG_RETURN(error);
+}
int ha_myisam::update_row(const byte * old_data, byte * new_data)
{
@@ -870,4 +916,3 @@ int ha_myisam::ft_read(byte * buf)
table->status=error ? STATUS_NOT_FOUND: 0;
return error;
}
-
diff --git a/sql/ha_myisam.h b/sql/ha_myisam.h
index a25711d720b..8e391f6e3c2 100644
--- a/sql/ha_myisam.h
+++ b/sql/ha_myisam.h
@@ -30,6 +30,7 @@ class ha_myisam: public handler
{
MI_INFO *file;
uint int_option_flag;
+ int repair(THD *thd, MI_CHECK &param);
public:
ha_myisam(TABLE *table): handler(table), file(0),
@@ -76,6 +77,8 @@ class ha_myisam: public handler
int reset(void);
int external_lock(THD *thd, int lock_type);
int delete_all_rows(void);
+ void deactivate_non_unique_index(ha_rows rows);
+ bool activate_all_index(THD *thd);
ha_rows records_in_range(int inx,
const byte *start_key,uint start_key_len,
enum ha_rkey_function start_search_flag,
diff --git a/sql/handler.h b/sql/handler.h
index a86b390e78b..208ae989218 100644
--- a/sql/handler.h
+++ b/sql/handler.h
@@ -261,6 +261,8 @@ public:
virtual int optimize(THD* thd);
virtual int analyze(THD* thd);
virtual int dump(THD* thd, int fd = -1) { return ER_DUMP_NOT_IMPLEMENTED; }
+ virtual void deactivate_non_unique_index(ha_rows rows) {}
+ virtual bool activate_all_index(THD *thd) {return 0;}
// not implemented by default
virtual int net_read_dump(NET* net)
{ return ER_DUMP_NOT_IMPLEMENTED; }
diff --git a/sql/item_func.cc b/sql/item_func.cc
index e59221eb0f5..90201597873 100644
--- a/sql/item_func.cc
+++ b/sql/item_func.cc
@@ -1040,9 +1040,13 @@ udf_handler::fix_fields(THD *thd,TABLE_LIST *tables,Item_result_field *func,
char buff[sizeof(double)]; // Max argument in function
DBUG_ENTER("Item_udf_func::fix_fields");
- if (thd && check_stack_overrun(thd,buff))
- return 0; // Fatal error flag is set!
-
+ if (thd)
+ {
+ if (check_stack_overrun(thd,buff))
+ return 0; // Fatal error flag is set!
+ }
+ else
+ thd=current_thd; // In WHERE / const clause
udf_func *tmp_udf=find_udf(u_d->name,strlen(u_d->name),1);
if (!tmp_udf)
@@ -1140,9 +1144,7 @@ udf_handler::fix_fields(THD *thd,TABLE_LIST *tables,Item_result_field *func,
break;
}
}
-
- if(thd)
- thd->net.last_error[0]=0;
+ thd->net.last_error[0]=0;
my_bool (*init)(UDF_INIT *, UDF_ARGS *, char *)=
(my_bool (*)(UDF_INIT *, UDF_ARGS *, char *))
u_d->func_init;
@@ -1588,6 +1590,8 @@ static user_var_entry *get_variable(HASH *hash, LEX_STRING &name,
bool Item_func_set_user_var::fix_fields(THD *thd,TABLE_LIST *tables)
{
+ if (!thd)
+ thd=current_thd;
if (Item_func::fix_fields(thd,tables) ||
!(entry= get_variable(&thd->user_vars, name, 1)))
return 1;
diff --git a/sql/item_timefunc.cc b/sql/item_timefunc.cc
index 90a6cc2910c..fcb68d07f61 100644
--- a/sql/item_timefunc.cc
+++ b/sql/item_timefunc.cc
@@ -431,6 +431,7 @@ void Item_func_curdate::fix_length_and_dec()
ltime.second= 0;
ltime.second_part=0;
ltime.neg=0;
+ ltime.time_type=TIMESTAMP_DATE;
}
bool Item_func_curdate::get_date(TIME *res,
@@ -487,6 +488,7 @@ void Item_func_now::fix_length_and_dec()
ltime.second= start->tm_sec;
ltime.second_part=0;
ltime.neg=0;
+ ltime.time_type=TIMESTAMP_FULL;
}
bool Item_func_now::get_date(TIME *res,
diff --git a/sql/mini_client.cc b/sql/mini_client.cc
index 49433bdf96a..c0e6b3c3cbe 100644
--- a/sql/mini_client.cc
+++ b/sql/mini_client.cc
@@ -231,7 +231,7 @@ static int mc_sock_connect(File s, const struct sockaddr *name, uint namelen, ui
return connect(s, (struct sockaddr*) name, namelen);
#else
int flags, res, s_err;
- size_socket s_err_size = sizeof(uint);
+ socklen_t s_err_size = sizeof(uint);
fd_set sfds;
struct timeval tv;
@@ -500,7 +500,7 @@ mc_mysql_connect(MYSQL *mysql,const char *host, const char *user,
UNIXaddr.sun_family = AF_UNIX;
strmov(UNIXaddr.sun_path, unix_socket);
if (mc_sock_connect(sock,(struct sockaddr *) &UNIXaddr, sizeof(UNIXaddr),
- mysql->options.connect_timeout) <0)
+ mysql->options.connect_timeout) <0)
{
DBUG_PRINT("error",("Got error %d on connect to local server",ERRNO));
net->last_errno=CR_CONNECTION_ERROR;
diff --git a/sql/mysql_priv.h b/sql/mysql_priv.h
index 0963b4dd03a..788502ca2a3 100644
--- a/sql/mysql_priv.h
+++ b/sql/mysql_priv.h
@@ -219,6 +219,7 @@ int quick_rm_table(enum db_type base,const char *db,
const char *table_name);
bool mysql_change_db(THD *thd,const char *name);
void mysql_parse(THD *thd,char *inBuf,uint length);
+void mysql_init_select(LEX *lex);
pthread_handler_decl(handle_one_connection,arg);
int handle_bootstrap(THD *thd,FILE *file);
sig_handler end_thread_signal(int sig);
diff --git a/sql/mysqld.cc b/sql/mysqld.cc
index 179c7ecd9dc..ed3a5d38188 100644
--- a/sql/mysqld.cc
+++ b/sql/mysqld.cc
@@ -656,7 +656,7 @@ static void set_user(const char *user)
unireg_abort(1);
}
#ifdef HAVE_INITGROUPS
- initgroups(user,ent->pw_gid);
+ initgroups((char*) user,ent->pw_gid);
#endif
if (setgid(ent->pw_gid) == -1)
{
@@ -876,8 +876,8 @@ void end_thread(THD *thd, bool put_in_cache)
thread_count--;
delete thd;
- if (cached_thread_count < thread_cache_size && ! abort_loop &&
- !kill_cached_threads)
+ if (put_in_cache && cached_thread_count < thread_cache_size &&
+ ! abort_loop && !kill_cached_threads)
{
/* Don't kill the thread, just put it in cache for reuse */
DBUG_PRINT("info", ("Adding thread to cache"))
@@ -891,8 +891,9 @@ void end_thread(THD *thd, bool put_in_cache)
{
wake_thread--;
thd=thread_cache.get();
- threads.append(thd);
+ thd->real_id=pthread_self();
(void) thd->store_globals();
+ threads.append(thd);
pthread_mutex_unlock(&LOCK_thread_count);
DBUG_VOID_RETURN;
}
@@ -2229,7 +2230,7 @@ CHANGEABLE_VAR changeable_vars[] = {
{ "bdb_cache_size", (long*) &berkeley_cache_size, KEY_CACHE_SIZE,
20*1024, (long) ~0, 0, IO_SIZE},
#endif
- { "connect_timeout", (long*) &connect_timeout,CONNECT_TIMEOUT,1,65535,0,1},
+ { "connect_timeout", (long*) &connect_timeout,CONNECT_TIMEOUT,2,65535,0,1},
{ "delayed_insert_timeout",(long*) &delayed_insert_timeout,
DELAYED_WAIT_TIMEOUT,1,~0L,0,1},
{ "delayed_insert_limit",(long*) &delayed_insert_limit,
diff --git a/sql/sql_load.cc b/sql/sql_load.cc
index 6f7db9c5a48..8f6af5a811b 100644
--- a/sql/sql_load.cc
+++ b/sql/sql_load.cc
@@ -217,13 +217,16 @@ int mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list,
table->time_stamp=0;
table->next_number_field=table->found_next_number_field;
VOID(table->file->extra(HA_EXTRA_WRITE_CACHE));
+ table->file->deactivate_non_unique_index((ha_rows) 0);
table->copy_blobs=1;
if (!field_term->length() && !enclosed->length())
error=read_fixed_length(thd,info,table,fields,read_info);
else
error=read_sep_field(thd,info,table,fields,read_info,*enclosed);
- if (table->file->extra(HA_EXTRA_NO_CACHE))
+ if (table->file->extra(HA_EXTRA_NO_CACHE) ||
+ table->file->activate_all_index((ha_rows) 0))
error=1; /* purecov: inspected */
+
table->time_stamp=save_time_stamp;
table->next_number_field=0;
if (thd->lock)
diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc
index 0c7b0b8187f..3d903acbfbf 100644
--- a/sql/sql_parse.cc
+++ b/sql/sql_parse.cc
@@ -34,6 +34,9 @@ extern I_List<i_string> binlog_do_db, binlog_ignore_db;
extern int yyparse(void);
extern "C" pthread_mutex_t THR_LOCK_keycache;
+#ifdef SOLARIS
+extern "C" int gethostname(char *name, int namelen);
+#endif
static bool check_table_access(THD *thd,uint want_access,TABLE_LIST *tables);
static bool check_lock_tables(THD *thd,TABLE_LIST *tables);
@@ -1571,22 +1574,23 @@ mysql_execute_command(void)
/* Check that the user isn't trying to change a password for another
user if he doesn't have UPDATE privilege to the MySQL database */
- List_iterator <LEX_USER> user_list(lex->users_list);
- LEX_USER *user;
- if(thd->user)
+ if (thd->user) // If not replication
+ {
+ LEX_USER *user;
+ List_iterator <LEX_USER> user_list(lex->users_list);
while ((user=user_list++))
+ {
+ if (user->password.str &&
+ (strcmp(thd->user,user->user.str) ||
+ user->host.str &&
+ my_strcasecmp(user->host.str, thd->host ? thd->host : thd->ip)))
{
- if (user->password.str &&
- (strcmp(thd->user,user->user.str) ||
- user->host.str && my_strcasecmp(user->host.str,
- thd->host ? thd->host : thd->ip)))
- {
- if (check_access(thd, UPDATE_ACL, "mysql",0,1))
- goto error;
- break; // We are allowed to do changes
- }
+ if (check_access(thd, UPDATE_ACL, "mysql",0,1))
+ goto error;
+ break; // We are allowed to do changes
}
-
+ }
+ }
if (tables)
{
if (grant_option && check_grant(thd,
@@ -1614,11 +1618,11 @@ mysql_execute_command(void)
res = mysql_grant(thd, lex->db, lex->users_list, lex->grant,
lex->sql_command == SQLCOM_REVOKE);
if(!res)
- {
- mysql_update_log.write(thd->query,thd->query_length);
- Query_log_event qinfo(thd, thd->query);
- mysql_bin_log.write(&qinfo);
- }
+ {
+ mysql_update_log.write(thd->query,thd->query_length);
+ Query_log_event qinfo(thd, thd->query);
+ mysql_bin_log.write(&qinfo);
+ }
}
break;
}
@@ -1853,12 +1857,27 @@ mysql_init_query(THD *thd)
thd->lex.table_list.first=0;
thd->lex.table_list.next= (byte**) &thd->lex.table_list.first;
- thd->lex.proc_list.first=0; // Needed by sql_select
thd->fatal_error=0; // Safety
thd->last_insert_id_used=thd->query_start_used=thd->insert_id_used=0;
DBUG_VOID_RETURN;
}
+void
+mysql_init_select(LEX *lex)
+{
+ lex->where=lex->having=0;
+ lex->select_limit=current_thd->default_select_limit;
+ lex->offset_limit=0L;
+ lex->options=0;
+ lex->exchange = 0;
+ lex->proc_list.first=0;
+ lex->order_list.elements=lex->group_list.elements=0;
+ lex->order_list.first=0;
+ lex->order_list.next= (byte**) &lex->order_list.first;
+ lex->group_list.first=0;
+ lex->group_list.next= (byte**) &lex->group_list.first;
+}
+
void
mysql_parse(THD *thd,char *inBuf,uint length)
diff --git a/sql/sql_select.cc b/sql/sql_select.cc
index 030b6f6fb5c..749bbb5b1f5 100644
--- a/sql/sql_select.cc
+++ b/sql/sql_select.cc
@@ -1939,35 +1939,37 @@ get_best_combination(JOIN *join)
}
else
{
- for (i=0 ; i < keyparts ; keyuse++,i++)
- {
- while (keyuse->keypart != i ||
- ((~used_tables) & keyuse->used_tables))
- keyuse++; /* Skipp other parts */
-
- uint maybe_null= test(keyinfo->key_part[i].null_bit);
- j->ref.items[i]=keyuse->val; // Save for cond removal
- if (!keyuse->used_tables &&
- !(join->select_options & SELECT_DESCRIBE))
- { // Compare against constant
- store_key_item *tmp=new store_key_item(keyinfo->key_part[i].field,
- (char*)key_buff + maybe_null,
- maybe_null ?
- (char*) key_buff : 0,
- keyinfo->key_part[i].length,
- keyuse->val);
- if (current_thd->fatal_error)
- {
- return TRUE;
+ THD *thd=current_thd;
+ for (i=0 ; i < keyparts ; keyuse++,i++)
+ {
+ while (keyuse->keypart != i ||
+ ((~used_tables) & keyuse->used_tables))
+ keyuse++; /* Skipp other parts */
+
+ uint maybe_null= test(keyinfo->key_part[i].null_bit);
+ j->ref.items[i]=keyuse->val; // Save for cond removal
+ if (!keyuse->used_tables &&
+ !(join->select_options & SELECT_DESCRIBE))
+ { // Compare against constant
+ store_key_item *tmp=new store_key_item(keyinfo->key_part[i].field,
+ (char*)key_buff +
+ maybe_null,
+ maybe_null ?
+ (char*) key_buff : 0,
+ keyinfo->key_part[i].length,
+ keyuse->val);
+ if (thd->fatal_error)
+ {
+ return TRUE;
+ }
+ tmp->copy();
}
- tmp->copy();
+ else
+ *ref_key++= get_store_key(keyuse,join->const_table_map,
+ &keyinfo->key_part[i],
+ (char*) key_buff,maybe_null);
+ key_buff+=keyinfo->key_part[i].store_length;
}
- else
- *ref_key++= get_store_key(keyuse,join->const_table_map,
- &keyinfo->key_part[i],
- (char*) key_buff,maybe_null);
- key_buff+=keyinfo->key_part[i].store_length;
- }
} /* not ftkey */
*ref_key=0; // end_marker
if (j->type == JT_FT) /* no-op */;
@@ -2319,8 +2321,11 @@ join_free(JOIN *join)
if (join->table)
{
- /* only sorted table is cached */
- if (join->tables > join->const_tables)
+ /*
+ Only a sorted table may be cached. This sorted table is always the
+ first non const table in join->table
+ */
+ if (join->tables > join->const_tables) // Test for not-const tables
free_io_cache(join->table[join->const_tables]);
for (tab=join->join_tab,end=tab+join->tables ; tab != end ; tab++)
{
@@ -2858,23 +2863,24 @@ remove_eq_conds(COND *cond,Item::cond_result *cond_value)
Item_func_isnull *func=(Item_func_isnull*) cond;
Item **args= func->arguments();
+ THD *thd=current_thd;
if (args[0]->type() == Item::FIELD_ITEM)
{
Field *field=((Item_field*) args[0])->field;
if (field->flags & AUTO_INCREMENT_FLAG && !field->table->maybe_null &&
- (current_thd->options & OPTION_AUTO_IS_NULL) &&
- current_thd->insert_id())
+ (thd->options & OPTION_AUTO_IS_NULL) &&
+ thd->insert_id())
{
COND *new_cond;
if ((new_cond= new Item_func_eq(args[0],
new Item_int("last_insert_id()",
- current_thd->insert_id(),
+ thd->insert_id(),
21))))
{
cond=new_cond;
- cond->fix_fields(current_thd,0);
+ cond->fix_fields(thd,0);
}
- current_thd->insert_id(0); // Clear for next request
+ thd->insert_id(0); // Clear for next request
}
/* fix to replace 'NULL' dates with '0' (shreeve@uci.edu) */
else if (((field->type() == FIELD_TYPE_DATE) ||
@@ -2885,7 +2891,7 @@ remove_eq_conds(COND *cond,Item::cond_result *cond_value)
if ((new_cond= new Item_func_eq(args[0],new Item_int("0", 0, 2))))
{
cond=new_cond;
- cond->fix_fields(current_thd,0);
+ cond->fix_fields(thd,0);
}
}
}
@@ -6229,6 +6235,11 @@ static bool add_ref_to_table_cond(THD *thd, JOIN_TAB *join_tab)
}
if (thd->fatal_error)
DBUG_RETURN(TRUE);
+
+ /*
+ Here we pass 0 as the first argument to fix_fields that don't need
+ to do any stack checking (This is already done in the initial fix_fields).
+ */
cond->fix_fields((THD *) 0,(TABLE_LIST *) 0);
if (join_tab->select)
{
diff --git a/sql/sql_table.cc b/sql/sql_table.cc
index 6e39a1a962b..57ba100ef96 100644
--- a/sql/sql_table.cc
+++ b/sql/sql_table.cc
@@ -709,6 +709,7 @@ bool close_cached_table(THD *thd,TABLE *table)
DBUG_RETURN(result);
}
+
int mysql_repair_table(THD* thd, TABLE_LIST* tables, HA_CHECK_OPT* check_opt)
{
TABLE_LIST *table;
@@ -1144,6 +1145,11 @@ int mysql_alter_table(THD *thd,char *new_db, char *new_name,
}
if (alter)
{
+ if (def->sql_type == FIELD_TYPE_BLOB)
+ {
+ my_error(ER_BLOB_CANT_HAVE_DEFAULT,MYF(0),def->change);
+ DBUG_RETURN(-1);
+ }
def->def=alter->def; // Use new default
alter_it.remove();
}
@@ -1504,6 +1510,8 @@ copy_data_between_tables(TABLE *from,TABLE *to,List<create_field> &create,
to->file->external_lock(thd,F_WRLCK);
to->file->extra(HA_EXTRA_WRITE_CACHE);
+ from->file->info(HA_STATUS_VARIABLE);
+ to->file->deactivate_non_unique_index(from->file->records);
List_iterator<create_field> it(create);
create_field *def;
@@ -1554,6 +1562,8 @@ copy_data_between_tables(TABLE *from,TABLE *to,List<create_field> &create,
to->file->print_error(tmp_error,MYF(0));
error=1;
}
+ if (to->file->activate_all_index(thd))
+ error=1;
if (ha_commit(thd) || to->file->external_lock(thd,F_UNLCK))
error=1;
*copied= found_count;
diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy
index 21f7ec1c48a..006d829005f 100644
--- a/sql/sql_yacc.yy
+++ b/sql/sql_yacc.yy
@@ -680,17 +680,7 @@ create3:
/* empty*/ {}
| opt_duplicate SELECT_SYM
{
- LEX *lex=Lex;
- lex->where=lex->having=0;
- lex->select_limit=current_thd->default_select_limit;
- lex->offset_limit=0L;
- lex->options=0;
- lex->exchange = 0;
- lex->order_list.elements=lex->group_list.elements=0;
- lex->order_list.first=0;
- lex->order_list.next= (byte**) &lex->order_list.first;
- lex->group_list.first=0;
- lex->group_list.next= (byte**) &lex->group_list.first;
+ mysql_init_select(Lex);
}
select_options select_item_list opt_select_from {}
@@ -1149,17 +1139,8 @@ select:
SELECT_SYM
{
LEX *lex=Lex;
- lex->where=lex->having=0;
- lex->select_limit=current_thd->default_select_limit;
- lex->offset_limit=0L;
- lex->options=0;
lex->sql_command= SQLCOM_SELECT;
- lex->exchange = 0;
- lex->order_list.elements=lex->group_list.elements=0;
- lex->order_list.first=0;
- lex->order_list.next= (byte**) &lex->order_list.first;
- lex->group_list.first=0;
- lex->group_list.next= (byte**) &lex->group_list.first;
+ mysql_init_select(lex);
}
select_options select_item_list select_into
@@ -1973,17 +1954,9 @@ insert_values:
| SELECT_SYM
{
LEX *lex=Lex;
- lex->where=lex->having=0;
- lex->select_limit=current_thd->default_select_limit;
- lex->offset_limit=0L;
- lex->options=0;
- lex->order_list.elements=lex->group_list.elements=0;
- lex->order_list.first=0;
- lex->order_list.next= (byte**) &lex->order_list.first;
- lex->group_list.first=0;
- lex->group_list.next= (byte**) &lex->group_list.first;
lex->sql_command = (lex->sql_command == SQLCOM_INSERT ?
SQLCOM_INSERT_SELECT : SQLCOM_REPLACE_SELECT);
+ mysql_init_select(lex);
}
select_options select_item_list select_from {}
diff --git a/sql/structs.h b/sql/structs.h
index b32f957da25..edd1a2a68c4 100644
--- a/sql/structs.h
+++ b/sql/structs.h
@@ -70,6 +70,9 @@ typedef struct st_key {
KEY_PART_INFO *key_part;
char *name; /* Name of key */
ulong *rec_per_key; /* Key part distribution */
+ union {
+ uint bdb_return_if_eq;
+ } handler;
} KEY;
diff --git a/strings/ctype-sjis.c b/strings/ctype-sjis.c
index e59a6f59dbb..119443a0c95 100644
--- a/strings/ctype-sjis.c
+++ b/strings/ctype-sjis.c
@@ -224,7 +224,7 @@ int my_strnncoll_sjis(const uchar *s1, int len1, const uchar *s2, int len2)
int my_strcoll_sjis(const uchar *s1, const uchar *s2)
{
- return my_strnncoll_sjis(s1, strlen(s1), s2, strlen(s2));
+ return (uint) my_strnncoll_sjis(s1,(uint) strlen(s1), s2,(uint) strlen(s2));
}
int my_strnxfrm_sjis(uchar *dest, uchar *src, int len, int srclen)
@@ -245,7 +245,7 @@ int my_strnxfrm_sjis(uchar *dest, uchar *src, int len, int srclen)
int my_strxfrm_sjis(uchar *dest, uchar *src, int len)
{
- return my_strnxfrm_sjis(dest, src, len, strlen(src));
+ return my_strnxfrm_sjis(dest, src, len, (uint) strlen(src));
}
diff --git a/strings/ctype-tis620.c b/strings/ctype-tis620.c
index bc80819c2a9..a495f15c846 100644
--- a/strings/ctype-tis620.c
+++ b/strings/ctype-tis620.c
@@ -12,7 +12,7 @@
*/
-/* $Id$
+/*
This file is basicly tis620 character sets with some extra functions
for tis-620 handling
*/
@@ -443,7 +443,7 @@ static uchar* thai2sortable(const uchar * tstr,uint len)
uint bufSize;
len = (uint) strnlen((char*) tstr,len);
- bufSize = buffsize((char*) tstr);
+ bufSize = (uint) buffsize((char*) tstr);
if(!(pRight1 = (uchar *)malloc(sizeof(uchar) * bufSize))) {
return( (uchar*) tstr);
}
@@ -530,7 +530,7 @@ int my_strnxfrm_tis620(uchar * dest, uchar * src, int len, int srclen)
{
uint bufSize;
uchar *tmp;
- bufSize = buffsize((char*)src);
+ bufSize = (uint) buffsize((char*)src);
tmp = thai2sortable(src,srclen);
set_if_smaller(bufSize,(uint) len);
memcpy((uchar *)dest, tmp, bufSize);
@@ -563,7 +563,7 @@ int my_strxfrm_tis620(uchar * dest, uchar * src, int len)
uint bufSize;
uchar *tmp;
- bufSize = buffsize((char*) src);
+ bufSize = (uint) buffsize((char*) src);
tmp = thai2sortable(src, len);
memcpy((uchar *) dest, tmp, bufSize);
free(tmp);
@@ -586,7 +586,7 @@ my_bool my_like_range_tis620(const char *ptr, uint ptr_length, pchar escape,
uint tbuff_length;
tbuff = (char*) (tc=thai2sortable((uchar*) ptr, ptr_length));
- tbuff_length = buffsize(ptr);
+ tbuff_length = (uint) buffsize(ptr);
end = tbuff + tbuff_length;
for(;tbuff != end && min_str != min_end; tbuff++)
{
diff --git a/strings/t_ctype.h b/strings/t_ctype.h
index f60e6d98272..6aca3fa911c 100644
--- a/strings/t_ctype.h
+++ b/strings/t_ctype.h
@@ -9,8 +9,8 @@
"as is" without express or implied warranty.
*/
-/* $Id$
- LC_COLLATE category + Level information
+/*
+ LC_COLLATE category + Level information
*/
#ifndef _t_ctype_h
diff --git a/support-files/mysql.spec.sh b/support-files/mysql.spec.sh
index bd50f3cfc4b..b0fdb1592c3 100644
--- a/support-files/mysql.spec.sh
+++ b/support-files/mysql.spec.sh
@@ -162,6 +162,8 @@ sh -c "PATH=\"${MYSQL_BUILD_PATH:-/bin:/usr/bin}\" \
--with-comment=\"Official MySQL RPM\";
# Add this for more debugging support
# --with-debug
+ # Add this for MyISAM RAID support:
+ # --with-raid
"
# benchdir does not fit in above model. Maybe a separate bench distribution
diff --git a/vio/viotest-ssl.cc b/vio/viotest-ssl.cc
index a708831ba67..a3ad92a7c9c 100644
--- a/vio/viotest-ssl.cc
+++ b/vio/viotest-ssl.cc
@@ -6,8 +6,6 @@
#include <unistd.h>
-VIO_RCSID(vio, viotest_ssl, "$Id$")
-
void
fatal_error( const char* r)
{
diff --git a/vio/viotest-sslconnect.cc b/vio/viotest-sslconnect.cc
index 505aac024f7..89e1a6e7dfe 100644
--- a/vio/viotest-sslconnect.cc
+++ b/vio/viotest-sslconnect.cc
@@ -13,8 +13,6 @@
#include <unistd.h>
-VIO_RCSID(vio, viotest_sslconnect, "$Id$")
-
void
fatal_error( const char* r)
{
diff --git a/vio/viotest.cc b/vio/viotest.cc
index f675267665d..490a9ca6694 100644
--- a/vio/viotest.cc
+++ b/vio/viotest.cc
@@ -11,8 +11,6 @@
#include <string.h>
-VIO_RCSID(vio, Vio, "$Id$")
-
VIO_NS_USING;
int