summaryrefslogtreecommitdiff
path: root/tools/dev/benchmarks
diff options
context:
space:
mode:
authorLorry Tar Creator <lorry-tar-importer@lorry>2017-08-05 16:22:51 +0000
committerLorry Tar Creator <lorry-tar-importer@lorry>2017-08-05 16:22:51 +0000
commitcf46733632c7279a9fd0fe6ce26f9185a4ae82a9 (patch)
treeda27775a2161723ef342e91af41a8b51fedef405 /tools/dev/benchmarks
parentbb0ef45f7c46b0ae221b26265ef98a768c33f820 (diff)
downloadsubversion-tarball-master.tar.gz
Diffstat (limited to 'tools/dev/benchmarks')
-rw-r--r--tools/dev/benchmarks/RepoPerf/ClearMemory.cpp55
-rw-r--r--tools/dev/benchmarks/RepoPerf/TimeWin.cpp118
-rw-r--r--tools/dev/benchmarks/RepoPerf/copy_repo.py313
-rw-r--r--tools/dev/benchmarks/RepoPerf/win_repo_bench.py268
-rwxr-xr-xtools/dev/benchmarks/large_dirs/create_bigdir.sh10
5 files changed, 759 insertions, 5 deletions
diff --git a/tools/dev/benchmarks/RepoPerf/ClearMemory.cpp b/tools/dev/benchmarks/RepoPerf/ClearMemory.cpp
new file mode 100644
index 0000000..06ef6f5
--- /dev/null
+++ b/tools/dev/benchmarks/RepoPerf/ClearMemory.cpp
@@ -0,0 +1,55 @@
+/* ClearMemory.cpp --- A simple Window memory cleaning tool
+ *
+ * ====================================================================
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ * ====================================================================
+ */
+
+#include "targetver.h"
+
+#include <Windows.h>
+
+#include <stdio.h>
+#include <tchar.h>
+
+int _tmain(int argc, _TCHAR* argv[])
+{
+ // Get the current memory usage stats
+ MEMORYSTATUSEX statex;
+ statex.dwLength = sizeof (statex);
+ GlobalMemoryStatusEx(&statex);
+
+ // (Clean) cache memory will be listed under "available".
+ // So, allocate all available RAM, touch it and release it again.
+ unsigned char *memory = new unsigned char[statex.ullAvailPhys];
+ if (memory)
+ {
+ // Make every page dirty.
+ for (DWORDLONG i = 0; i < statex.ullAvailPhys; i += 4096)
+ memory[i]++;
+
+ // Give everything back to the OS.
+ // The in-RAM file read cache is empty now. There may still be bits in
+ // the swap file as well as dirty write buffers. But we don't care
+ // much about these here ...
+ delete memory;
+ }
+
+ return 0;
+}
+
diff --git a/tools/dev/benchmarks/RepoPerf/TimeWin.cpp b/tools/dev/benchmarks/RepoPerf/TimeWin.cpp
new file mode 100644
index 0000000..4acab99
--- /dev/null
+++ b/tools/dev/benchmarks/RepoPerf/TimeWin.cpp
@@ -0,0 +1,118 @@
+/* TimeWin.cpp --- A simple Windows tool inspired by Unix' "time".
+ *
+ * ====================================================================
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ * ====================================================================
+ */
+
+#include "targetver.h"
+
+#include <Windows.h>
+
+#include <stdio.h>
+#include <tchar.h>
+
+void usage()
+{
+ _tprintf(_T("Execute a command, redirect its stdout to NUL and print\n"));
+ _tprintf(_T("execution times ELAPSED\\tUSER\\tKERNEL in seconds.\n"));
+ _tprintf(_T("\n"));
+ _tprintf(_T("Usage: TimeWin.EXE COMMAND [PARAMETERS]\n"));
+}
+
+LPCTSTR skip_first_arg(LPCTSTR targv)
+{
+ LPCTSTR s = _tcschr(targv, ' ');
+ while (s && *s == ' ')
+ ++s;
+
+ return s;
+}
+
+double as_seconds(FILETIME time)
+{
+ return (double)*reinterpret_cast<LONGLONG *>(&time) / 10000000.0;
+}
+
+int _tmain(int argc, LPTSTR argv[])
+{
+ // Minimal CL help support
+ if (argc < 2 || _tcscmp(argv[1], _T("/?")) == 0)
+ {
+ usage();
+ return 0;
+ }
+
+ // Get a file handle for NUL.
+ SECURITY_ATTRIBUTES sa;
+ sa.nLength = sizeof(sa);
+ sa.lpSecurityDescriptor = NULL;
+ sa.bInheritHandle = TRUE;
+
+ HANDLE nul = CreateFile(_T("nul"), FILE_APPEND_DATA, FILE_SHARE_WRITE,
+ &sa, OPEN_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL);
+
+ // Construct a process startup info that uses the same handles as this
+ // one but redirects stdout to NUL.
+ STARTUPINFO startup_info;
+ GetStartupInfo(&startup_info);
+ startup_info.dwFlags |= STARTF_USESTDHANDLES;
+ startup_info.hStdOutput = nul;
+
+ // Execute the command line.
+ PROCESS_INFORMATION process_info;
+ CreateProcess(NULL, _tscdup(skip_first_arg(GetCommandLine())), NULL, NULL,
+ TRUE, NORMAL_PRIORITY_CLASS, NULL, NULL, &startup_info,
+ &process_info);
+
+ // Get a handle with the needed access rights to the child process.
+ HANDLE child = INVALID_HANDLE_VALUE;
+ DuplicateHandle(GetCurrentProcess(), process_info.hProcess,
+ GetCurrentProcess(), &child,
+ PROCESS_QUERY_INFORMATION | SYNCHRONIZE, FALSE, 0);
+
+ // Wait for the child to finish.
+ // If there was problem earlier (application not found etc.), this will fail.
+ bool success = false;
+ if (WaitForSingleObject(child, INFINITE) == WAIT_OBJECT_0)
+ {
+ // Finally, query the timers and show the result
+ FILETIME start_time, end_time, user_time, kernel_time;
+ if (GetProcessTimes(child, &start_time, &end_time, &kernel_time,
+ &user_time))
+ {
+ _tprintf(_T("%1.3f\t%1.3f\t%1.3f\n"),
+ as_seconds(end_time) - as_seconds(start_time),
+ as_seconds(user_time), as_seconds(kernel_time));
+ success = true;
+ }
+ }
+
+ // In case of failure, give some indication that something went wrong.
+ if (!success)
+ _tprintf(_T("?.???\t?.???f\t?.???\n"),
+
+ // Be good citizens and clean up our mess
+ CloseHandle(child);
+ CloseHandle(process_info.hThread);
+ CloseHandle(process_info.hProcess);
+
+ CloseHandle(nul);
+
+ return 0;
+}
diff --git a/tools/dev/benchmarks/RepoPerf/copy_repo.py b/tools/dev/benchmarks/RepoPerf/copy_repo.py
new file mode 100644
index 0000000..a95a82d
--- /dev/null
+++ b/tools/dev/benchmarks/RepoPerf/copy_repo.py
@@ -0,0 +1,313 @@
+#!/usr/bin/env python
+#
+# copy_repo.py: create multiple, interleaved copies of a set of repositories.
+#
+# Subversion is a tool for revision control.
+# See http://subversion.apache.org for more information.
+#
+# ====================================================================
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+######################################################################
+
+# General modules
+import os
+import random
+import shutil
+import sys
+
+class Separators:
+ """ This class is a container for dummy / filler files.
+ It will be used to create spaces between repository
+ versions on disk, i.e. to simulate some aspect of
+ real-world FS fragmentation.
+
+ It gets initialized with some parent path as well as
+ the desired average file size and will create a new
+ such file with each call to write(). Automatic
+ sharding keeps FS specific overhead at bay. Call
+ cleanup() to eventually delete all dummy files. """
+
+ buffer = "A" * 4096
+ """ Write this non-NULL contents into the dummy files. """
+
+ def __init__(self, path, average_size):
+ """ Initialize and store all dummy files in a '__tmp'
+ sub-folder of PATH. The size of each dummy file
+ is a random value and will be slightly AVERAGE_SIZE
+ kBytes on average. A value of 0 will effectively
+ disable dummy file creation. """
+
+ self.path = os.path.join(path, '__tmp')
+ self.size = average_size
+ self.count = 0
+
+ if os.path.exists(self.path):
+ shutil.rmtree(self.path)
+
+ os.mkdir(self.path)
+
+ def write(self):
+ """ Add a new dummy file """
+
+ # Throw dice of a file size.
+ # Factor 1024 for kBytes, factor 2 for being an average.
+ size = (int)(float(self.size) * random.random() * 2 * 1024.0)
+
+ # Don't create empty files. This also implements the
+ # "average = 0 means no files" rule.
+ if size > 0:
+ self.count += 1
+
+ # Create a new shard for every 1000 files
+ subfolder = os.path.join(self.path, str(self.count / 1000))
+ if not os.path.exists(subfolder):
+ os.mkdir(subfolder)
+
+ # Create and write the file in 4k chunks.
+ # Writing full chunks will result in average file sizes
+ # being slightly above the SELF.SIZE. That's good enough
+ # for our purposes.
+ f = open(os.path.join(subfolder, str(self.count)), "wb")
+ while size > 0:
+ f.write(self.buffer)
+ size -= len(self.buffer)
+
+ f.close()
+
+ def cleanup(self):
+ """ Get rid of all the files (and folders) that we created. """
+
+ shutil.rmtree(self.path)
+
+class Repository:
+ """ Encapsulates key information of a repository. Is is being
+ used for copy sources only and contains information about
+ its NAME, PATH, SHARD_SIZE, HEAD revision and MIN_UNPACKED_REV. """
+
+ def _read_config(self, filename):
+ """ Read and return all lines from FILENAME.
+ This will be used to read 'format', 'current' etc. . """
+
+ f = open(os.path.join(self.path, 'db', filename), "rb")
+ lines = f.readlines()
+ f.close()
+
+ return lines
+
+ def __init__(self, parent, name):
+ """ Constructor collecting everything we need to know about
+ the repository NAME within PARENT folder. """
+
+ self.name = name
+ self.path = os.path.join(parent, name)
+
+ self.shard_size = int(self._read_config('format')[1].split(' ')[2])
+ self.min_unpacked_rev = int(self._read_config('min-unpacked-rev')[0])
+ self.head = int(self._read_config('current')[0])
+
+ def needs_copy(self, revision):
+ """ Return True if REVISION is a revision in this repository
+ and is "directly copyable", i.e. is either non-packed or
+ the first rev in a packed shard. Everything else is either
+ not a valid rev or already gets / got copied as part of
+ some packed shard. """
+
+ if revision > self.head:
+ return False
+ if revision < self.min_unpacked_rev:
+ return revision % self.shard_size == 0
+
+ return True
+
+ @classmethod
+ def is_repository(cls, path):
+ """ Quick check that PATH is (probably) a repository.
+ This is mainly to filter out aux files put next to
+ (not inside) the repositories to copy. """
+
+ format_path = os.path.join(path, 'db', 'format')
+ return os.path.isfile(format_path)
+
+class Multicopy:
+ """ Helper class doing the actual copying. It copies individual
+ revisions and packed shards from the one source repository
+ to multiple copies of it. The copies have the same name
+ as the source repo but with numbers 0 .. N-1 appended to it.
+
+ The copy process is being initiated by the constructor
+ (copies the repo skeleton w/o revision contents). Revision
+ contents is then copied by successive calls to the copy()
+ method. """
+
+ def _init_copy(self, number):
+ """ Called from the constructor, this will copy SELF.SOURCE_REPO
+ into NUMBER new repos below SELF.DEST_BASE but omit everything
+ below db/revs and db/revprops. """
+
+ src = self.source_repo.path
+ dst = self.dest_base + str(number)
+
+ # Copy the repo skeleton w/o revs and revprops
+ shutil.copytree(src, dst, ignore=shutil.ignore_patterns('revs', 'revprops'))
+
+ # Add revs and revprops
+ self.dst_revs.append(os.path.join(dst, 'db', 'revs'))
+ self.dst_revprops.append(os.path.join(dst, 'db', 'revprops'))
+
+ os.mkdir(self.dst_revs[number])
+ os.mkdir(self.dst_revprops[number])
+
+ def _copy_packed_shard(self, shard, number):
+ """ Copy packed shard number SHARD from SELF.SOURCE_REPO to
+ the copy NUMBER below SELF.DEST_BASE. """
+
+ # Shards are simple subtrees
+ src_revs = os.path.join(self.src_revs, str(shard) + '.pack')
+ dst_revs = os.path.join(self.dst_revs[number], str(shard) + '.pack')
+ src_revprops = os.path.join(self.src_revprops, str(shard) + '.pack')
+ dst_revprops = os.path.join(self.dst_revprops[number], str(shard) + '.pack')
+
+ shutil.copytree(src_revs, dst_revs)
+ shutil.copytree(src_revprops, dst_revprops)
+
+ # Special case: revprops of rev 0 are never packed => extra copy
+ if shard == 0:
+ src_revprops = os.path.join(self.src_revprops, '0')
+ dest_revprops = os.path.join(self.dst_revprops[number], '0')
+
+ shutil.copytree(src_revprops, dest_revprops)
+
+ def _copy_single_revision(self, revision, number):
+ """ Copy non-packed REVISION from SELF.SOURCE_REPO to the copy
+ NUMBER below SELF.DEST_BASE. """
+
+ shard = str(revision / self.source_repo.shard_size)
+
+ # Auto-create shard folder
+ if revision % self.source_repo.shard_size == 0:
+ os.mkdir(os.path.join(self.dst_revs[number], shard))
+ os.mkdir(os.path.join(self.dst_revprops[number], shard))
+
+ # Copy the rev file and the revprop file
+ src_rev = os.path.join(self.src_revs, shard, str(revision))
+ dest_rev = os.path.join(self.dst_revs[number], shard, str(revision))
+ src_revprop = os.path.join(self.src_revprops, shard, str(revision))
+ dest_revprop = os.path.join(self.dst_revprops[number], shard, str(revision))
+
+ shutil.copyfile(src_rev, dest_rev)
+ shutil.copyfile(src_revprop, dest_revprop)
+
+ def __init__(self, source, target_parent, count):
+ """ Initiate the copy process for the SOURCE repository to
+ be copied COUNT times into the TARGET_PARENT directory. """
+
+ self.source_repo = source
+ self.dest_base = os.path.join(target_parent, source.name)
+
+ self.src_revs = os.path.join(source.path, 'db', 'revs')
+ self.src_revprops = os.path.join(source.path, 'db', 'revprops')
+
+ self.dst_revs = []
+ self.dst_revprops = []
+ for i in range(0, count):
+ self._init_copy(i)
+
+ def copy(self, revision, number):
+ """ Copy (packed or non-packed) REVISION from SELF.SOURCE_REPO
+ to the copy NUMBER below SELF.DEST_BASE.
+
+ SELF.SOURCE_REPO.needs_copy(REVISION) must be True. """
+
+ if revision < self.source_repo.min_unpacked_rev:
+ self._copy_packed_shard(revision / self.source_repo.shard_size, number)
+ else:
+ self._copy_single_revision(revision, number)
+
+def copy_repos(src, dst, count, separator_size):
+ """ Under DST, create COUNT copies of all repositories immediately
+ below SRC.
+
+ All copies will "interleaved" such that we copy each individual
+ revision / packed shard to all target repos first before
+ continuing with the next revision / packed shard. After each
+ round (revision / packed shard) insert a temporary file of
+ SEPARATOR_SIZE kBytes on average to add more spacing between
+ revisions. The temp files get automatically removed at the end.
+
+ Please note that this function will clear DST before copying
+ anything into it. """
+
+ # Remove any remnants from the target folder.
+ # (DST gets auto-created by the first repo copy.)
+ shutil.rmtree(dst)
+
+ # Repositories to copy and the respective copy utilities
+ repositories = []
+ copies = []
+
+ # Find repositories, initiate copies and determine the range of
+ # revisions to copy in total
+ max_revision = 0
+ for name in os.listdir(src):
+ if Repository.is_repository(os.path.join(src, name)):
+ repository = Repository(src, name)
+ repositories.append(repository)
+ copies.append(Multicopy(repository, dst, count))
+
+ if repository.head > max_revision:
+ max_revision = repository.head
+
+ # Temp file collection (spacers)
+ separators = Separators(dst, separator_size)
+
+ # Copy all repos in revision,number-major order
+ for revision in xrange(0, max_revision + 1):
+ for number in xrange(0, count):
+
+ any_copy = False
+ for i in xrange(0, len(repositories)):
+ if repositories[i].needs_copy(revision):
+ any_copy = True
+ copies[i].copy(revision, number)
+
+ # Don't add spacers when nothing got copied (REVISION is
+ # packed in all repositories).
+ if any_copy:
+ separators.write()
+
+ # Now that all data is in position, remove the spacers
+ separators.cleanup()
+
+def show_usage():
+ """ Write a simple CL docstring """
+
+ print "Copies and duplicates repositories in a way that mimics larger deployments."
+ print
+ print "Usage:"
+ print "copy_repo.py SRC DST COUNT SEPARATOR_SIZE"
+ print
+ print "SRC Immediate parent folder of all the repositories to copy."
+ print "DST Folder to copy into; current contents will be lost."
+ print "COUNT Number of copies to create of each source repository."
+ print "SEPARATOR_SIZE Additional spacing, in kBytes, between revisions."
+
+#main function
+if len(argv) == 5:
+ copy_repos(sys.argv[1], sys.argv[2], int(sys.argv[3]), int(sys.argv[4]))
+else:
+ show_usage()
diff --git a/tools/dev/benchmarks/RepoPerf/win_repo_bench.py b/tools/dev/benchmarks/RepoPerf/win_repo_bench.py
new file mode 100644
index 0000000..d470a04
--- /dev/null
+++ b/tools/dev/benchmarks/RepoPerf/win_repo_bench.py
@@ -0,0 +1,268 @@
+#!/usr/bin/env python
+#
+# win_repo_bench.py: run repository / server performance tests on Windows.
+#
+# Subversion is a tool for revision control.
+# See http://subversion.apache.org for more information.
+#
+# ====================================================================
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+######################################################################
+
+# General modules
+import os
+import shutil
+import sys
+import subprocess
+import time
+
+from win32com.shell import shell, shellcon
+
+# Adapt these paths to your needs
+
+# Contains all the REPOSITORIES
+repo_parent = "C:\\repos"
+
+# Where to create working copies
+wc_path = "C:\\wc"
+exe_path = "C:\\develop\\Subversion\\trunk\\Release"
+apache_path = "C:\\develop\\Subversion"
+
+# Test these repositories and in this order.
+# Actual repository names have numbers 0 .. REPETITIONS-1 append to them
+repositories = ["ruby-f6-nonpacked", "ruby-f7-nonpacked",
+ "ruby-f6-packed", "ruby-f7-packed",
+ "bsd-f6-nonpacked", "bsd-f7-nonpacked",
+ "bsd-f6-packed", "bsd-f7-packed"]
+
+# Basically lists the RA backends to test but as long as all repositories
+# can be accessed using any of them, arbitrary URLs are possible.
+prefixes = ["svn://localhost/", "http://localhost/svn/", "file:///C:/repos/"]
+
+# Number of time to repeat the tests. For each iteration, there must be
+# a separate copy of all repositories.
+repetitions = 3
+
+# Server configurations to test
+configurations = ['slow', 'medium', 'fast']
+svnserve_params = {
+ 'slow':"",
+ 'medium':"-M 256" ,
+ 'fast':"-M 1024 -c 0 --cache-revprops yes --block-read yes --client-speed 1000"
+}
+
+
+def clear_memory():
+ """ Clear in-RAM portion of the file / disk cache """
+ subprocess.call(["ClearMemory.exe"])
+
+def start_server(prefix, config):
+ """ Depending on the url PREFIX, start the corresponding server with the
+ given CONFIGuration. file: and http: access will actually have been
+ configured by set_config(). """
+
+ if prefix[:4] == "svn:":
+ exe = os.path.join(exe_path, "svnserve.exe")
+ command = "cmd.exe /c start " + exe + " -dr " + repo_parent + \
+ " " + svnserve_params[config]
+ subprocess.call(command)
+ time.sleep(2)
+ elif prefix[:5] == "http:":
+ exe = os.path.join(apache_path, 'bin', 'httpd.exe')
+ subprocess.call(exe + " -k start")
+ time.sleep(2)
+
+def stop_server(prefix):
+ """ Depending on the url PREFIX, stop / kill the corresponding server. """
+
+ if prefix[:4] == "svn:":
+ subprocess.call("cmd.exe /c taskkill /im svnserve.exe /f > nul 2>&1")
+ time.sleep(1)
+ elif prefix[:5] == "http:":
+ exe = os.path.join(apache_path, 'bin', 'httpd.exe')
+ subprocess.call(exe + " -k stop")
+ time.sleep(1)
+
+def run_cs_command(state, config, repository, prefix, args):
+ """ Run the client-side command given in ARGS. Log the STATE of the
+ caches, the CONFIG we are using, the REPOSITORY, the url PREFIX
+ and finally the execution times. """
+
+ # Make sure we can create a new working copy if we want to.
+ if os.path.exists(wc_path):
+ shutil.rmtree(wc_path)
+
+ # Select the client to use.
+ if ('null-export' in args) or ('null-log' in args):
+ exe = os.path.join(exe_path, "svn-bench.exe")
+ else:
+ exe = os.path.join(exe_path, "svn.exe")
+
+ # Display the operation
+ repo_title = repository.replace('nonpacked', 'nopack')
+ print state, "\t", repo_title, "\t", prefix, "\t", config, "\t",
+ sys.stdout.flush()
+
+ # Execute the command and show the execution times
+ subprocess.call(["TimeWin.exe", exe] + args)
+
+
+def run_test_cs_sequence(config, repository, run, prefix, command, args):
+ """ Run the client-side COMMAND with the given ARGS in various stages
+ of cache heat-up. Execute the test with server CONFIG on REPOSITORY
+ with the given url PREFIX. """
+
+ # Build the full URL to use. Exports operate on the main dev line only.
+ url = prefix + repository + str(run)
+ if (command == 'export') or (command == 'null-export'):
+ if repository[:3] == 'bsd':
+ url += '/head'
+ else:
+ url += '/trunk'
+
+ # Full set of command arguments
+ args = [command, url] + args
+
+ # Free up caches best we can.
+ clear_memory()
+
+ # Caches are quite cool now and ready to take up new data
+ start_server(prefix, config)
+ run_cs_command("Cold", config, repository, prefix, args)
+ stop_server(prefix)
+
+ # OS caches are quite hot now.
+ # Run operation from hot OS caches but cold SVN caches.
+ start_server(prefix, config)
+ run_cs_command("WarmOS", config, repository, prefix, args)
+ stop_server(prefix)
+
+ # OS caches may be even hotter now.
+ # Run operation from hot OS caches but cold SVN caches.
+ start_server(prefix, config)
+ run_cs_command("HotOS", config, repository, prefix, args)
+
+ # Keep server process and thus the warmed up SVN caches.
+ # Run operation from hot OS and SVN caches.
+ run_cs_command("WrmSVN", config, repository, prefix, args)
+ run_cs_command("HotSVN", config, repository, prefix, args)
+ stop_server(prefix)
+
+
+def set_config(config):
+ """ Switch configuration files to CONFIG. This overwrites the client
+ config file with config.$CONFIG and the server config file with
+ subversion.$CONFIG.conf. """
+
+ appdata = shell.SHGetFolderPath(0, shellcon.CSIDL_APPDATA, None, 0)
+ svn_config_folder = os.path.join(appdata, 'Subversion')
+ svn_config_file = os.path.join(svn_config_folder, 'config')
+ svn_config_template = svn_config_file + '.' + config
+
+ shutil.copyfile(svn_config_template, svn_config_file)
+
+ apache_config_folder = os.path.join(apache_path, 'conf', 'extra')
+ apache_config_file = os.path.join(apache_config_folder, 'subversion.conf')
+ apache_config_template = os.path.join(apache_config_folder,
+ 'subversion.' + config + '.conf')
+
+ shutil.copyfile(apache_config_template, apache_config_file)
+
+
+def run_test_cs_configurations(command, args):
+ """ Run client COMMAND with basic arguments ARGS in all configurations
+ repeatedly with all servers on all repositories. """
+
+ print
+ print command
+ print
+
+ for config in configurations:
+ set_config(config)
+ for prefix in prefixes:
+ # These two must be the innermost loops and must be in that order.
+ # It gives us the coldest caches and the least temporal favoritism.
+ for run in range(0, repetitions):
+ for repository in repositories:
+ run_test_cs_sequence(config, repository, run, prefix, command, args)
+
+def run_admin_command(state, config, repository, args):
+ """ Run the svnadmin command given in ARGS. Log the STATE of the
+ caches, the CONFIG we are using, the REPOSITORY and finally
+ the execution times. """
+
+ exe = os.path.join(exe_path, "svnadmin.exe")
+
+ if config == 'medium':
+ extra = ['-M', '256']
+ elif config == 'fast':
+ extra = ['-M', '1024']
+ else:
+ extra = []
+
+ print state, "\t", repository, "\t", config, "\t",
+ sys.stdout.flush()
+ subprocess.call(["TimeWin.exe", exe] + args + extra)
+
+def run_test_admin_sequence(config, repository, run, command, args):
+ """ Run the svnadmin COMMAND with the given ARGS in various stages
+ of cache heat-up. Execute the test with server CONFIG on
+ REPOSITORY. """
+
+ # Full set of command arguments
+ path = os.path.join(repo_parent, repository + str(run))
+ args = [command, path] + args
+
+ # Free up caches best we can.
+ clear_memory()
+
+ # svnadmin runs can be quite costly and are usually CPU-bound.
+ # Test with "cold" and "hot" CPU caches only.
+ run_admin_command("Cold", config, repository, args)
+ run_admin_command("Hot", config, repository, args)
+
+
+def run_test_admin_configurations(command, args):
+ """ Run svnadmin COMMAND with basic arguments ARGS in all configurations
+ repeatedly on all repositories. """
+
+ print
+ print command
+ print
+
+ for config in configurations:
+ # These two must be the innermost loops and must be in that order.
+ # It gives us the coldest caches and the least temporal favoritism.
+ for run in range(0, repetitions):
+ for repository in repositories:
+ run_test_admin_sequence(config, repository, run, command, args)
+
+
+def bench():
+ """ Run all performance tests. """
+
+ run_test_cs_configurations('log', ['-v', '--limit', '50000'])
+ run_test_cs_configurations('export', [wc_path, '-q'])
+
+ run_test_cs_configurations('null-log', ['-v', '--limit', '50000', '-q'])
+ run_test_cs_configurations('null-export', ['-q'])
+
+ run_test_admin_configurations('dump', ['-q'])
+
+# main function
+bench()
diff --git a/tools/dev/benchmarks/large_dirs/create_bigdir.sh b/tools/dev/benchmarks/large_dirs/create_bigdir.sh
index a389dcc..c2830c8 100755
--- a/tools/dev/benchmarks/large_dirs/create_bigdir.sh
+++ b/tools/dev/benchmarks/large_dirs/create_bigdir.sh
@@ -29,7 +29,7 @@ SVNPATH="$('pwd')/subversion"
# Comment the SVNSERVE line to use file:// instead of svn://.
SVN=${SVNPATH}/svn/svn
-SVNADMIN=${SVNPATH}/svnadmin/svnadmin
+SVNADMIN=${SVNPATH}/svnadmin/svnadmin
SVNSERVE=${SVNPATH}/svnserve/svnserve
# VALGRIND="valgrind --tool=callgrind"
@@ -45,7 +45,7 @@ REPOROOT=/dev/shm
FILECOUNT=1
MAXCOUNT=20000
-# only 1.7 supports server-side caching and uncompressed data transfer
+# only 1.7 supports server-side caching and uncompressed data transfer
SERVEROPTS="-c 0 -M 400"
@@ -162,7 +162,7 @@ run_svn_get() {
fi
}
-# main loop
+# main loop
while [ $FILECOUNT -lt $MAXCOUNT ]; do
echo "Processing $FILECOUNT files in the same folder"
@@ -172,7 +172,7 @@ while [ $FILECOUNT -lt $MAXCOUNT ]; do
mkdir $WC/$FILECOUNT
for i in 1 $sequence; do
echo "File number $i" > $WC/$FILECOUNT/$i
- done
+ done
printf "\tAdding files ... \t"
run_svn add $FILECOUNT -q
@@ -182,7 +182,7 @@ while [ $FILECOUNT -lt $MAXCOUNT ]; do
printf "\tCommit files ... \t"
run_svn_ci $FILECOUNT add
-
+
printf "\tListing files ... \t"
run_svn ls $FILECOUNT