From f3765db04b903b3671733e07cf1541a51966dd14 Mon Sep 17 00:00:00 2001 From: Lorry Tar Creator Date: Fri, 21 Feb 2014 04:46:49 +0000 Subject: Imported from /home/lorry/working-area/delta_python-packages_posix-ipc-tarball/posix_ipc-0.9.8.tar.gz. --- INSTALL | 7 + LICENSE | 24 + PKG-INFO | 44 + README | 20 + ReadMe.html | 869 +++++++++++++ VERSION | 1 + demo/ReadMe.txt | 54 + demo/SampleIpcConversation.png | Bin 0 -> 11909 bytes demo/cleanup.py | 22 + demo/conclusion.c | 146 +++ demo/conclusion.py | 86 ++ demo/make_all.sh | 12 + demo/md5.c | 381 ++++++ demo/md5.h | 91 ++ demo/params.txt | 20 + demo/premise.c | 223 ++++ demo/premise.py | 118 ++ demo/utils.c | 119 ++ demo/utils.h | 18 + demo/utils.py | 90 ++ demo2/ReadMe.txt | 41 + demo2/SampleIpcConversation.png | Bin 0 -> 11909 bytes demo2/cleanup.py | 15 + demo2/conclusion.py | 65 + demo2/params.txt | 4 + demo2/premise.py | 75 ++ demo2/utils.py | 42 + demo3/ReadMe.txt | 12 + demo3/cleanup.py | 13 + demo3/one_shot_signal.py | 45 + demo3/one_shot_thread.py | 38 + demo3/repeating_signal.py | 46 + demo3/repeating_thread.py | 40 + demo3/utils.py | 15 + demo4/ReadMe.txt | 15 + demo4/child.py | 21 + demo4/parent.py | 46 + history.html | 501 ++++++++ posix_ipc_module.c | 2612 +++++++++++++++++++++++++++++++++++++++ prober.py | 427 +++++++ prober/sniff_mq_existence.c | 8 + prober/sniff_mq_prio_max.c | 9 + prober/sniff_page_size.c | 19 + prober/sniff_realtime_lib.c | 11 + prober/sniff_sem_getvalue.c | 8 + prober/sniff_sem_timedwait.c | 8 + prober/sniff_sem_value_max.c | 7 + setup.py | 69 ++ 48 files changed, 6557 insertions(+) create mode 100644 INSTALL create mode 100644 LICENSE create mode 100644 PKG-INFO create mode 100644 README create mode 100644 ReadMe.html create mode 100644 VERSION create mode 100644 demo/ReadMe.txt create mode 100644 demo/SampleIpcConversation.png create mode 100755 demo/cleanup.py create mode 100644 demo/conclusion.c create mode 100644 demo/conclusion.py create mode 100755 demo/make_all.sh create mode 100644 demo/md5.c create mode 100644 demo/md5.h create mode 100644 demo/params.txt create mode 100644 demo/premise.c create mode 100644 demo/premise.py create mode 100644 demo/utils.c create mode 100644 demo/utils.h create mode 100644 demo/utils.py create mode 100644 demo2/ReadMe.txt create mode 100644 demo2/SampleIpcConversation.png create mode 100755 demo2/cleanup.py create mode 100644 demo2/conclusion.py create mode 100644 demo2/params.txt create mode 100644 demo2/premise.py create mode 100644 demo2/utils.py create mode 100644 demo3/ReadMe.txt create mode 100755 demo3/cleanup.py create mode 100644 demo3/one_shot_signal.py create mode 100644 demo3/one_shot_thread.py create mode 100644 demo3/repeating_signal.py create mode 100644 demo3/repeating_thread.py create mode 100644 demo3/utils.py create mode 100644 demo4/ReadMe.txt create mode 100644 demo4/child.py create mode 100644 demo4/parent.py create mode 100644 history.html create mode 100644 posix_ipc_module.c create mode 100644 prober.py create mode 100644 prober/sniff_mq_existence.c create mode 100644 prober/sniff_mq_prio_max.c create mode 100644 prober/sniff_page_size.c create mode 100644 prober/sniff_realtime_lib.c create mode 100644 prober/sniff_sem_getvalue.c create mode 100644 prober/sniff_sem_timedwait.c create mode 100644 prober/sniff_sem_value_max.c create mode 100644 setup.py diff --git a/INSTALL b/INSTALL new file mode 100644 index 0000000..b4f7c72 --- /dev/null +++ b/INSTALL @@ -0,0 +1,7 @@ +To install, just use the normal setup.py routine: + +python setup.py install + +If you get a permissions error, try this: + +sudo python setup.py install diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..e7f82a4 --- /dev/null +++ b/LICENSE @@ -0,0 +1,24 @@ +Copyright (c) 2012, Philip Semanchuk +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of posix_ipc nor the names of its contributors may be + used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY ITS CONTRIBUTORS ''AS IS'' AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL Philip Semanchuk BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/PKG-INFO b/PKG-INFO new file mode 100644 index 0000000..8a799e6 --- /dev/null +++ b/PKG-INFO @@ -0,0 +1,44 @@ +Metadata-Version: 1.1 +Name: posix_ipc +Version: 0.9.8 +Summary: POSIX IPC primitives (semaphores, shared memory and message queues) for Python +Home-page: http://semanchuk.com/philip/posix_ipc/ +Author: Philip Semanchuk +Author-email: philip@semanchuk.com +License: http://creativecommons.org/licenses/BSD/ +Download-URL: http://semanchuk.com/philip/posix_ipc/posix_ipc-0.9.8.tar.gz +Description: posix_ipc is a Python module (written in C) that permits creation and + manipulation of POSIX inter-process semaphores, shared memory and message + queues on platforms supporting the POSIX Realtime Extensions a.k.a. POSIX + 1003.1b-1993. This includes nearly all Unices and Windows + Cygwin 1.7. + + posix_ipc is compatible with Python 2 and 3. + + The latest version, contact info, sample code, etc. are available on PyPI + and here: + http://semanchuk.com/philip/posix_ipc/ + + Installation is as simple as `python setup.py install`. Usage, a version + history, warnings, suggestions, etc. are covered in ReadMe.html. + + posix_ipc is free software (free as in speech and free as in beer) released + under a 3-clause BSD license. Complete licensing information is available in + the LICENSE file. + + You might also be interested in the similar System V IPC module at: + http://semanchuk.com/philip/sysv_ipc/ +Keywords: ipc inter-process communication semaphore shared memory shm message queue +Platform: UNKNOWN +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: BSD License +Classifier: Operating System :: MacOS :: MacOS X +Classifier: Operating System :: POSIX :: BSD :: FreeBSD +Classifier: Operating System :: POSIX :: Linux +Classifier: Operating System :: POSIX :: SunOS/Solaris +Classifier: Operating System :: POSIX +Classifier: Operating System :: Unix +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 2 +Classifier: Programming Language :: Python :: 3 +Classifier: Topic :: Utilities diff --git a/README b/README new file mode 100644 index 0000000..93b93de --- /dev/null +++ b/README @@ -0,0 +1,20 @@ +posix_ipc is a Python module (written in C) that permits creation and +manipulation of POSIX inter-process semaphores, shared memory and message +queues on platforms supporting the POSIX Realtime Extensions a.k.a. POSIX +1003.1b-1993. This includes nearly all Unices and Windows + Cygwin 1.7. + +posix_ipc is compatible with Python 2 and 3. + +The latest version, contact info, sample code, etc. are available on PyPI +and here: +http://semanchuk.com/philip/posix_ipc/ + +Installation is as simple as `python setup.py install`. Usage, a version +history, warnings, suggestions, etc. are covered in ReadMe.html. + +posix_ipc is free software (free as in speech and free as in beer) released +under a 3-clause BSD license. Complete licensing information is available in +the LICENSE file. + +You might also be interested in the similar System V IPC module at: +http://semanchuk.com/philip/sysv_ipc/ diff --git a/ReadMe.html b/ReadMe.html new file mode 100644 index 0000000..c274387 --- /dev/null +++ b/ReadMe.html @@ -0,0 +1,869 @@ + + + + + + + + + + POSIX IPC for Python + + + + + + +

POSIX IPC for Python - Semaphores, Shared Memory and Message Queues

+ +
+ +
RSS +
+ +

The Python extension module posix_ipc gives access +to POSIX inter-process semaphores, shared memory and message queues on +systems that support the POSIX Realtime Extensions a.k.a. POSIX 1003.1b-1993. +That includes most (all?) Linuxes with kernel ≥ 2.6, OpenSolaris ≥ 2008.05 +and FreeBSD ≥ 7.2. +

+ +

OS X and other Unix-y platforms (including Windows + +Cygwin 1.7) provide partial +(or partially broken) support. See +the platform notes below for more details. +

+ +

This module is known to work with Python 2.4 – 3.3 (but not +3.0). It is released +under a BSD license. +

+ +

You might be interested in the very similar module +sysv_ipc which +provides Python access to IPC using System V semaphores, shared memory and +message queues. +System V IPC has broader OS support but is a little less easy to use and +usually lags behind this module a little. +

+ +

You can download +posix_ipc version 0.9.8 + +[MD5 sum] +[SHA1 sum] + +which contains the source code, setup.py, installation instructions and +sample code. The exact same +posix_ipc tarball is also available on PyPI. +

+ +

+You might want to read +all of the changes in this version and +about some known bugs. +

+ +

Note that this module doesn't support unnamed (anonymous) POSIX +semaphores. +

+ + + +

Module posix_ipc Documentation

+ +

Jump to semaphores, +shared memory, or +message queues.

+ +

Module Functions

+ +
+
unlink_semaphore(name)
+ unlink_shared_memory(name)
+ unlink_message_queue(name) +
+
Convenience functions that unlink the IPC object described + by name.
+
+ + +

Module Constants

+ +
+
O_CREX, O_CREAT, O_EXCL and O_TRUNC
+
These flags are used when creating IPC objects. + All except O_CREX are bitwise unique and can be + ORed together. O_CREX is shorthand for + O_CREAT | O_EXCL. + +

O_TRUNC is only useful when + creating SharedMemory objects.

+
+ +
PAGE_SIZE
+
The operating system's memory page size, in bytes. It's probably a + good idea to make shared memory segments some multiple of this size. +
+ +
SEMAPHORE_TIMEOUT_SUPPORTED
+
True if the underlying OS supports sem_timedwait(). If False, all + timeouts > 0 passed to a semaphore's acquire() method are + treated as infinity. + +

As far as I know, this is only False under OS X.

+
+ +
SEMAPHORE_VALUE_SUPPORTED
+
True if the underlying OS supports sem_getvalue(). If False, + accessing the value attribute on a Semaphore instance + will raise an AttributeError. + +

As far as I know, this is only False under OS X.

+
+ +
SEMAPHORE_VALUE_MAX
+
The maximum value that can be assigned to a semaphore. +
+ +
MESSAGE_QUEUES_SUPPORTED
+
True if the underlying OS supports message queues, False otherwise. +
+ +
QUEUE_MESSAGES_MAX_DEFAULT
+
The default value for a message queue's max_messages + attribute. This can be quite small under Linux (e.g. 10) + but is usually LONG_MAX everywhere else. +
+ +
QUEUE_MESSAGE_SIZE_MAX_DEFAULT
+
The default value for a message queue's max_message_size + attribute. This is 8k (or possibly smaller under Linux). +
+ +
QUEUE_PRIORITY_MAX
+
The maximum message queue message priority. +
+ +
USER_SIGNAL_MIN, USER_SIGNAL_MAX
+
The constants define a range of signal values reserved for + use by user applications (like yours). +
+
+ +

Module Errors

+ +

In addition to standard Python errors (e.g. ValueError), +this module raises custom errors. These errors cover +situations specific to IPC. +

+ + +
+
Error
+
The base error class for all the custom errors in this module. +
+ +
SignalError
+
Raised when a waiting call (e.g. sem.acquire()) is + interrupted by a signal other than KeyboardInterrupt. +
+ +
PermissionsError
+
Indicates that you've attempted something that the permissions on the + IPC object don't allow. +
+ +
ExistentialError
+
Indicates an error related to the existence or non-existence of + an IPC object. +
+ +
BusyError
+
Raised when a call times out. +
+
+ + +

The Semaphore Class

+ +

This is a handle to a semaphore.

+ +

Methods

+ +
+
Semaphore(name, [flags = 0, [mode = 0600, [initial_value = 0]]])
+
Creates a new semaphore or opens an existing one. + +

name must be None or + a string. If it is None, the module chooses a random + unused name. If it is a string, it + should begin with a slash and be valid according + to pathname rules on your system, e.g. + /wuthering_heights_by_semaphore +

+ +

The flags specify whether you want to create a + new semaphore or open an existing one. +

+ +
    +
  • With flags set to the default of 0, the module attempts + to open an existing semaphore and raises an error if that semaphore + doesn't exist. +
  • + +
  • With flags set to O_CREAT, + the module opens the semaphore if it exists (in which case mode and + initial value are ignored) or creates it if it doesn't. +
  • + +
  • With flags set to O_CREAT | O_EXCL + (or O_CREX), + the module creates a new semaphore identified by + name. If a + semaphore with that name already exists, the call raises + an ExistentialError. +
  • +
+
+ + +
acquire([timeout=None])
+
Waits (conditionally) until the semaphore's value is > 0 and then returns, + decrementing the semaphore. + +

The timeout (which can be a float) specifies how + many seconds this call should wait, if at all. +

+ +
    +
  • A timeout of None (the default) + implies no time limit. The call will not return until its wait + condition is satisfied. +
  • + +
  • When timeout is 0, the call + immediately raises a BusyError + if asked to wait. Since it will return immediately if not + asked to wait, this can be thought of as "non-blocking" mode. +
  • + +
  • When the timeout is > 0, the call + will wait no longer than timeout + seconds before either returning (having acquired the semaphore) + or raising a BusyError. + +

    On platforms that don't support the sem_timedwait() API, + a timeout > 0 is treated as + infinite. The call will not return until its wait + condition is satisfied. +

    + +

    Most platforms provide sem_timedwait(). OS X is a + notable exception. The module's Boolean constant + SEMAPHORE_TIMEOUT_SUPPORTED + is True on platforms that support sem_timedwait(). +

    +
  • +
+
+ +
release()
+
+ Releases (increments) the semaphore. +
+ +
close()
+
+ Closes the semaphore, indicating that the current process is + done with the semaphore. The effect of subsequent use of the semaphore + by the current process is undefined. Assuming it still exists, + (see unlink(), below) the semaphore can be re-opened. + +

You must call close() explicitly; it is + not called automatically + when a Semaphore object is garbage collected. +

+
+ + +
+ Destroys the semaphore, with a caveat. If any processes have the semaphore + open when unlink is called, the call to unlink returns immediately + but destruction of the semaphore is postponed until all processes + have closed the semaphore. + +

Note, however, that once a semaphore has been unlinked, + calls to open() with the same name should + refer to a new semaphore. Sound confusing? It is, and you'd + probably be wise structure your code so as to avoid + this situation. +

+
+
+ +

Attributes

+ +
+
name (read-only)
+
The name provided in the constructor.
+ +
value (read-only)
+
The integer value of the semaphore. Not available on OS X. + (See Platforms) +
+
+ +

Context Manager Support

+ +

These semaphores provide __enter__() and __exit__() +methods so they can be used in context managers. For instance -- +

+ +
+with posix_ipc.Semaphore(name) as sem:
+    # Do something...
+
+ +

Entering the context acquires the semaphore, exiting the context releases + the semaphore. See demo4/child.py for a complete example. +

+ + +

The SharedMemory Class

+ +

This is a handle to a shared memory segment. POSIX shared memory segments +masquerade as files, and so the handle to a shared memory segment is just +a file descriptor that can be mmapped. +

+ +

Methods

+ +
+
SharedMemory(name, [flags = 0, [mode = 0600, [size = 0, [read_only = false]]]])
+
Creates a new shared memory segment or opens an existing one. + +

name must be None or + a string. If it is None, the module chooses a random + unused name. If it is a string, it + should begin with a slash and be valid according + to pathname rules on your system, e.g. + /four_yorkshiremen_sharing_memories +

+ +

On some systems you need to have write access to the path.

+ +

The flags specify whether you want to create a + new shared memory segment or open an existing one. +

+ +
    +
  • With flags set to the default of 0, the module attempts + to open an existing segment and raises an error if that segment + doesn't exist. +
  • + +
  • With flags set to O_CREAT, + the module opens the segment if it exists (in which case + size and mode + are ignored) or creates it if it doesn't. +
  • + +
  • With flags set to O_CREAT | O_EXCL + (or O_CREX), + the module creates a new shared memory segment identified by + name. If a + segment with that name already exists, the call raises + an ExistentialError. +
  • +
+ +

When opening an existing shared memory segment, one can also specify + the flag O_TRUNC + to truncate the shared memory to zero bytes. +

+
+ + +
close_fd()
+
+ Closes the file descriptor associated with this SharedMemory + object. Calling close_fd() is the same as calling + os.close() + on a SharedMemory object's fd attribute. + +

You must call close_fd() or os.close() + explicitly; the file descriptor is not closed + automatically when a SharedMemory object is garbage collected. +

+ +

Closing the file descriptor has no effect on any mmap + objects that were created from it. See the demo for an + example. +

+
+ + +
unlink()
+
+ Marks the shared memory for destruction once all processes have unmapped it. + +

+ The + POSIX specification for shm_unlink() says, "Even if the object + continues to exist after the last shm_unlink(), reuse of the name shall subsequently + cause shm_open() to behave as if no shared memory object of this name exists + (that is, shm_open() will fail if O_CREAT is not set, or will create a new shared + memory object if O_CREAT is set)." +

+ +

I'll bet a virtual cup of coffee that this tricky part of the + standard is not well or consistently implemented in every OS. Caveat emptor. +

+
+
+ +

Attributes

+ +
+
name (read-only)
+
The name provided in the constructor.
+
fd (read-only)
+
The file descriptor that represents the memory segment.
+
size (read-only)
+
The size (in bytes) of the shared memory segment.
+
+ +

The MessageQueue Class

+ +

This is a handle to a message queue.

+ +

Methods

+ +
+
MessageQueue(name, [flags = 0, [mode = 0600, [max_messages = QUEUE_MESSAGES_MAX_DEFAULT, [max_message_size = QUEUE_MESSAGE_SIZE_MAX_DEFAULT, [read = True, [write = True]]]]]])
+
Creates a new message queue or opens an existing one. + +

name must be None or + a string. If it is None, the module chooses a random + unused name. If it is a string, it + should begin with a slash and be valid according + to pathname rules on your system, e.g. + /my_message_queue +

+ +

On some systems you need to have write access to the path.

+ +

The flags specify whether you want to + create a new queue or open an existing one. +

+ +
    +
  • With flags set to the default of + 0, the module attempts + to open an existing queue and raises an error if that queue + doesn't exist. +
  • + +
  • With flags set to O_CREAT, + the module opens the queue if it exists (in which case + size and mode + are ignored) or creates it if it doesn't. +
  • + +
  • With flags set to O_CREAT | O_EXCL + (or O_CREX), + the module creates a new message queue identified by + name. If a + queue with that name already exists, the call raises + an ExistentialError. +
  • +
+ +

Max_messages defines how many messages + can be in the queue at one time. When the queue is full, + calls to .send() will wait. +

+ +

Max_message_size defines the maximum + size (in bytes) of a message. +

+ +

Read and + write + default to True. If read/write + is False, calling .receive()/.send() on this object + is not permitted. + This doesn't affect other handles to the same queue. +

+
+ +
send(message, [timeout = None, [priority = 0]])
+
+ Sends a message via the queue. + +

The message string can contain embedded + NULLs (ASCII 0x00). Under Python 3, the message can + also be a bytes object. +

+ +

The timeout (which can be a float) + specifies how many seconds this call should wait if the + queue is full. Timeouts are irrelevant when the block + flag is False. +

+ +
    +
  • A timeout of None (the default) + implies no time limit. The call will not return until the + message is sent. +
  • + +
  • When timeout is 0, the call + immediately raises a BusyError + if asked to wait. +
  • + +
  • When the timeout is > 0, the call + will wait no longer than timeout + seconds before either returning (having sent the message) + or raising a BusyError. +
  • +
+ +

The priority allows you to order + messages in the queue. The highest priority message is received + first. By default, messages are sent at the lowest priority (0). +

+
+ +
receive([timeout = None])
+
+ Receives a message from the queue, returning a tuple of + (message, priority). Messages are received in the order of + highest priority to lowest, and in FIFO order for messages of + equal priority. + + Under Python 3, the returned message is a bytes object. + +

If the queue is empty, the call will not return immediately. + The timeout parameter controls the + wait just as for the function send(). +

+
+ +
request_notification([notification = None])
+
Depending on the parameter, requests or cancels notification from the + operating system when the queue changes from empty to non-empty. + +
    +
  • When notification is None + (the default), any existing notification request is + cancelled. +
  • + +
  • When notification is an + integer, notification will be sent as a signal of this + value that can be caught using a signal handler installed + with signal.signal(). +
  • + +
  • When notification is a tuple + of (function, param), notification will be sent + by invoking function(param) in a new + thread. +
  • +
+ +

Message queues accept only one notification request at a time. + If another process has already requested notifications from + this queue, this call will fail with a BusyError. +

+ +

The operating system delivers (at most) one notification + per request. If you want subsequent notifications, you must + request them by calling + request_notification() again. +

+
+ +
close()
+
+ Closes this reference to the queue. + +

You must call close() explicitly; it is + not called automatically + when a MessageQueue object is garbage collected. +

+
+ +
unlink()
+
+ Requests destruction of the queue. Although the call returns + immediately, actual destruction of the queue is postponed until all + references to it are closed. +
+
+ +

Attributes

+ +
+
name (read-only)
+
The name provided in the constructor.
+
mqd (read-only)
+
The message queue descriptor that represents the queue.
+
block
+
When True (the default), calls to .send() and + .receive() may wait (block) if they cannot immediately + satisfy the send/receive request. When block is False, + the module will raise BusyError + instead of waiting. +
+
max_messages (read-only)
+
The maximum number of messages the queue can hold.
+
max_message_size (read-only)
+
The maximum message size (in bytes).
+
current_messages (read-only)
+
The number of messages currently in the queue.
+
+ + +

Usage Tips

+ +

Sample Code

+ +

This module comes with three demonstrations. The first (in the +directory demo) shows how to use shared memory and semaphores. +The second (in the directory demo2) shows how to use +message queues. The third (demo3) shows how to use message queue +notifications. +

+ +

Nobody Likes a Mr. Messy

+ +

IPC objects are a little different from most Python objects +and therefore require a little more care on the part of the programmer. When a +program creates a IPC object, it creates something that +resides outside of its own process, just like a file on a hard drive. It +won't go away when your process ends unless you explicitly remove it. And since many +operating systems don't even give you a way to enumerate existing POSIX IPC +entities, it might be hard to figure out what you're leaving behind. +

+ +

In short, remember to clean up after yourself.

+ +

Semaphores and References

+ +

I know it's verboten to talk about pointers in Python, but I'm +going to do it anyway. +

+ +

Each Semaphore object created by this module contains a C pointer to +the IPC object created by the system. When you call sem.close(), +the object's internal pointer is set to NULL. This leaves the +object in a not-quite-useless state. You can still call sem.unlink() +or print sem.name, but calls to sem.aquire() or +sem.release() will raise an ExistentialError. +

+ +

If you know you're not going to use a Semaphore object after calling +sem.close() or sem.unlink(), you could you set your +semaphore variable to the return from the function (which is always +None) like so: +

+ +
+    my_sem = my_sem.close()
+
+ +

That will ensure you don't have any nearly useless objects laying around +that you might use by accident. +

+ +

This doesn't apply to shared memory and message queues because they're +referenced at the C level by a file descriptor rather than a pointer. +

+ +

Permissions

+ +

It appears that the read and write mode bits on IPC objects are +ignored by the operating system. For instance, on OS X, OpenSolaris and +Linux one can write to semaphores and message queues with a mode of +0400. +

+ + +

Message Queues

+ +

When creating a new message queue, you specify a maximum message size +which defaults to QUEUE_MESSAGE_SIZE_MAX_DEFAULT (currently 8192 +bytes). You can create a queue with a larger value, but be aware that +posix_ipc allocates a buffer the size of the maximum message size +every time receive() is called. +

+ +

Resizing Shared Memory Segments

+ +

+Under OS X/Darwin, ftruncate() can be used to set the memory size once +after the initial call to shm_open(). This module does that in the +SharedMemory constructor, so subsequent attempts to resize the shared memory +will fail. +

+ +

I don't know if this holds true on all platforms. If your platform supports multiple +calls to ftruncate(), you can call that via Python's os module, +passing the file descriptor exposed in the SharedMemory object. +

+ +

Consult Your Local man Pages

+ +

The posix_ipc module is just a wrapper around your system's API. If your +system's implementation has quirks, the man pages for +sem_open, sem_post, +sem_wait, sem_close, sem_unlink, shm_open, shm_unlink, mq_open, mq_send +mq_receive, mq_getattr, mq_close, mq_unlink and mq_notify will +probably cover them. +

+ +

Last But Not Least

+ +

For Pythonistas –

+ + + +

Known Bugs

+ +

I don't know of any bugs in this code. However, under Python 3 the +standard library modules accept bytes and bytearray objects for filenames in +addition to strings. One could argue that this module should behave the +same way. +

+ +

Also, this module doesn't support Python 3 memory views, which it +probably should (for shared memory objects). Support for that might come in +a later version. +

+ + +

Platform Notes

+ +

This module is just a wrapper around the operating system's functions, +so if the operating system doesn't provide a function, this module can't +either. The POSIX Realtime Extensions (POSIX 1003.1b-1993) are, as the name +implies, an extension to POSIX and so a platform can claim "POSIX +conformance" and still not support any or all of the IPC functions. +

+ +
+
Linux with kernel ≥ 2.6
+
All features supported.
+ +
OpenSolaris ≥ 2008.05
+
All features supported.
+ +
FreeBSD ≥ 7.2
+
All features supported. + +

Under 7.2, posix_ipc's demos fail unless they're run as + root. It's a simple permissions problem. Prefix the IPC object + names with /tmp in params.txt and the problem + goes away. I didn't see this behavior under FreeBSD 8.0, so it + must have been fixed at some point. +

+ +

If you don't have the sem and mqueuefs kernel + modules loaded, you'll get a message like this (or something + similarly discouraging) when you + try to create a semaphore or message queue:
+ Bad system call: 12 (core dumped) +

+ +

Type kldstat to list loaded modules, and + kldload sem or kldload mqueuefs if you need + to load either of these. BTW, + mqueuefs has + some cool features. +

+ +

+ Prior to 7.2, FreeBSD POSIX semaphore support was + broken. +

+
+ +
OS X (up to and including 10.8)
+
+ Message queues are not supported by OS X. Also, + sem_getvalue() and sem_timedwait() are not + supported. + +

From what I can tell, OS X does not support sem_init() or + sem_destroy(), so even if this module adds support for unnamed + semaphores, they won't be available under OS X. +

+ +
Windows + Cygwin 1.7
+ +
Cygwin is a Linux-like + environment for Windows. + +

Versions of Cygwin prior to 1.7 didn't support POSIX IPC. + Under Cygwin 1.7 beta 62 (released in early October 2009), + posix_ipc compiles and runs both demos. +

+
+
+ + + + diff --git a/VERSION b/VERSION new file mode 100644 index 0000000..b5d0ec5 --- /dev/null +++ b/VERSION @@ -0,0 +1 @@ +0.9.8 \ No newline at end of file diff --git a/demo/ReadMe.txt b/demo/ReadMe.txt new file mode 100644 index 0000000..56a35dd --- /dev/null +++ b/demo/ReadMe.txt @@ -0,0 +1,54 @@ +This demonstrates use of shared memory and semaphores via two applications +named after Mrs. Premise and Mrs. Conclusion of the Monty Python sketch. +http://www.youtube.com/watch?v=crIJvcWkVcs + +Like those two characters, these programs chat back and forth and the result +is a lot of nonsense. In this case, what the programs are saying isn't the +interesting part. What's interesting is how they're doing it. + +Mrs. Premise and Mrs. Conclusion (the programs, not the sketch characters) +communicate through POSIX shared memory with a semaphore to control access +to that memory. + +Mrs. Premise starts things off by creating the shared memory and semaphore +and writing a random string (the current time) to the memory. She then sits +in a loop reading the memory. If it holds the same message she wrote, she +does nothing. If it is a new message, it must be from Mrs. Conclusion. + +Meanwhile, Mrs. Conclusion is doing exactly the same thing, except that she +assumes Mrs. Premise will write the first message. + +When either of these programs reads a new message, they write back an md5 +hash of that message. This serves two purposes. First, it ensures that +subsequent messages are very different so that if a message somehow gets +corrupted (say by being partially overwritten by the next message), it will +not escape notice. Second, it ensures that corruption can be detected if +it happens, because Mrs. Premise and Mrs. Conclusion can calculate what the +other's response to their message should be. + +Since they use a semaphore to control access to the shared memory, Mrs. +Premise and Mrs. Conclusion won't ever find their messages corrupted no +matter how many messages they exchange. You can experiment with this by +setting ITERATIONS in params.txt to a very large value. You can change +LIVE_DANGEROUSLY (also in params.txt) to a non-zero value to tell Mrs. +Premise and Mrs. Conclusion to run without using the semaphore. The shared +memory will probably get corrupted in fewer than 1000 iterations. + +To run the demo, start Mrs. Premise first in one window and then run +Mrs. Conclusion in another. + + + The Fancy Version + ================= + +If you want to get fancy, you can play with C versions of Mrs. Premise and +Mrs. Conclusion. The script make_all.sh will compile them for you. (Linux +users will need to edit the script and uncomment the line for the +Linux-specific linker option.) + +The resulting executables are called premise and conclusion and work exactly +the same as their Python counterparts. You can have the two C programs talk +to one another, or you can have premise.py talk to the C version of +conclusion...the possibilities are endless. (Actually, there are only four +possible combinations but "endless" sounds better.) + diff --git a/demo/SampleIpcConversation.png b/demo/SampleIpcConversation.png new file mode 100644 index 0000000..9d11e7a Binary files /dev/null and b/demo/SampleIpcConversation.png differ diff --git a/demo/cleanup.py b/demo/cleanup.py new file mode 100755 index 0000000..afe4178 --- /dev/null +++ b/demo/cleanup.py @@ -0,0 +1,22 @@ +import posix_ipc +import utils + +params = utils.read_params() + +try: + posix_ipc.unlink_shared_memory(params["SHARED_MEMORY_NAME"]) + s = "memory segment %s removed" % params["SHARED_MEMORY_NAME"] + print (s) +except: + print ("memory doesn't need cleanup") + + +try: + posix_ipc.unlink_semaphore(params["SEMAPHORE_NAME"]) + s = "semaphore %s removed" % params["SEMAPHORE_NAME"] + print (s) +except: + print ("semaphore doesn't need cleanup") + + +print ("\nAll clean!") diff --git a/demo/conclusion.c b/demo/conclusion.c new file mode 100644 index 0000000..3807efd --- /dev/null +++ b/demo/conclusion.c @@ -0,0 +1,146 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "md5.h" +#include "utils.h" + +static const char MY_NAME[] = "Mrs. Conclusion"; + +// Set up a Mrs. Premise & Mrs. Conclusion conversation. + +int main() { + sem_t *the_semaphore = NULL; + int rc; + char s[1024]; + int i; + int done; + int fd; + void *pSharedMemory = NULL; + char last_message_i_wrote[256]; + char md5ified_message[256]; + struct param_struct params; + + say(MY_NAME, "Oooo 'ello, I'm Mrs. Conclusion!"); + + read_params(¶ms); + + // Mrs. Premise has already created the semaphore and shared memory. + // I just need to get handles to them. + the_semaphore = sem_open(params.semaphore_name, 0); + + if (the_semaphore == SEM_FAILED) { + the_semaphore = NULL; + sprintf(s, "Getting a handle to the semaphore failed; errno is %d", errno); + say(MY_NAME, s); + } + else { + // get a handle to the shared memory + fd = shm_open(params.shared_memory_name, O_RDWR, params.permissions); + + if (fd == -1) { + sprintf(s, "Couldn't get a handle to the shared memory; errno is %d", errno); + say(MY_NAME, s); + } + else { + // mmap it. + pSharedMemory = mmap((void *)0, (size_t)params.size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); + if (pSharedMemory == MAP_FAILED) { + sprintf(s, "MMapping the shared memory failed; errno is %d", errno); + say(MY_NAME, s); + } + else { + sprintf(s, "pSharedMemory = %p", pSharedMemory); + say(MY_NAME, s); + + i = 0; + done = 0; + last_message_i_wrote[0] = '\0'; + while (!done) { + sprintf(s, "iteration %d", i); + say(MY_NAME, s); + + // Wait for Mrs. Premise to free up the semaphore. + rc = acquire_semaphore(MY_NAME, the_semaphore, params.live_dangerously); + if (rc) + done = 1; + else { + while ( (!rc) && \ + (!strcmp((char *)pSharedMemory, last_message_i_wrote)) + ) { + // Nothing new; give Mrs. Premise another change to respond. + sprintf(s, "Read %zu characters '%s'", strlen((char *)pSharedMemory), (char *)pSharedMemory); + say(MY_NAME, s); + say(MY_NAME, "Releasing the semaphore"); + rc = release_semaphore(MY_NAME, the_semaphore, params.live_dangerously); + if (!rc) { + say(MY_NAME, "Waiting to acquire the semaphore"); + rc = acquire_semaphore(MY_NAME, the_semaphore, params.live_dangerously); + } + } + + md5ify(last_message_i_wrote, md5ified_message); + + // I always accept the first message (when i == 0) + if ( (i == 0) || (!strcmp(md5ified_message, (char *)pSharedMemory)) ) { + // All is well + i++; + + if (i == params.iterations) + done = 1; + + // MD5 the reply and write back to Mrs. Premise. + md5ify((char *)pSharedMemory, md5ified_message); + + // Write back to Mrs. Premise. + sprintf(s, "Writing %zu characters '%s'", strlen(md5ified_message), md5ified_message); + say(MY_NAME, s); + + strcpy((char *)pSharedMemory, md5ified_message); + + strcpy(last_message_i_wrote, md5ified_message); + } + else { + sprintf(s, "Shared memory corruption after %d iterations.", i); + say(MY_NAME, s); + sprintf(s, "Mismatch; rc = %d, new message is '%s', expected '%s'.", rc, (char *)pSharedMemory, md5ified_message); + say(MY_NAME, s); + done = 1; + } + } + + // Release the semaphore. + rc = release_semaphore(MY_NAME, the_semaphore, params.live_dangerously); + if (rc) + done = 1; + } + } + // Un-mmap the memory + rc = munmap(pSharedMemory, (size_t)params.size); + if (rc) { + sprintf(s, "Unmapping the memory failed; errno is %d", errno); + say(MY_NAME, s); + } + + // Close the shared memory segment's file descriptor + if (-1 == close(fd)) { + sprintf(s, "Closing memory's file descriptor failed; errno is %d", errno); + say(MY_NAME, s); + } + } + rc = sem_close(the_semaphore); + if (rc) { + sprintf(s, "Closing the semaphore failed; errno is %d", errno); + say(MY_NAME, s); + } + } + + + return 0; +} diff --git a/demo/conclusion.py b/demo/conclusion.py new file mode 100644 index 0000000..7ecd05b --- /dev/null +++ b/demo/conclusion.py @@ -0,0 +1,86 @@ +# Python modules +import time +import mmap +import os +import sys +PY_MAJOR_VERSION = sys.version_info[0] +# hashlib is only available in Python >= 2.5. I still want to support +# older Pythons so I import md5 if hashlib is not available. Fortunately +# md5 can masquerade as hashlib for my purposes. +try: + import hashlib +except ImportError: + import md5 as hashlib + +# 3rd party modules +import posix_ipc + +# Utils for this demo +import utils + + +PY_MAJOR_VERSION = sys.version_info[0] + +utils.say("Oooo 'ello, I'm Mrs. Conclusion!") + +params = utils.read_params() + +# Mrs. Premise has already created the semaphore and shared memory. +# I just need to get handles to them. +memory = posix_ipc.SharedMemory(params["SHARED_MEMORY_NAME"]) +semaphore = posix_ipc.Semaphore(params["SEMAPHORE_NAME"]) + +# MMap the shared memory +mapfile = mmap.mmap(memory.fd, memory.size) + +# Once I've mmapped the file descriptor, I can close it without +# interfering with the mmap. This also demonstrates that os.close() is a +# perfectly legitimate alternative to the SharedMemory's close_fd() method. +os.close(memory.fd) + +what_i_wrote = "" + +for i in range(0, params["ITERATIONS"]): + utils.say("iteration %d" % i) + if not params["LIVE_DANGEROUSLY"]: + # Wait for Mrs. Premise to free up the semaphore. + utils.say("Waiting to acquire the semaphore") + semaphore.acquire() + + s = utils.read_from_memory(mapfile) + + while s == what_i_wrote: + if not params["LIVE_DANGEROUSLY"]: + # Release the semaphore... + utils.say("Releasing the semaphore") + semaphore.release() + # ...and wait for it to become available again. + utils.say("Waiting to acquire the semaphore") + semaphore.acquire() + + s = utils.read_from_memory(mapfile) + + if what_i_wrote: + if PY_MAJOR_VERSION > 2: + what_i_wrote = what_i_wrote.encode() + try: + assert(s == hashlib.md5(what_i_wrote).hexdigest()) + except AssertionError: + utils.raise_error(AssertionError, + "Shared memory corruption after %d iterations." % i) + + if PY_MAJOR_VERSION > 2: + s = s.encode() + what_i_wrote = hashlib.md5(s).hexdigest() + + utils.write_to_memory(mapfile, what_i_wrote) + + if not params["LIVE_DANGEROUSLY"]: + utils.say("Releasing the semaphore") + semaphore.release() + +semaphore.close() +mapfile.close() + +utils.say("") +utils.say("%d iterations complete" % (i + 1)) diff --git a/demo/make_all.sh b/demo/make_all.sh new file mode 100755 index 0000000..baa8722 --- /dev/null +++ b/demo/make_all.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +# Linker opts is blank for OS X, FreeBSD and OpenSolaris +#LINKER_OPTIONS="" + +# Must link with realtime libs for Linux +LINKER_OPTIONS="-lrt" + +gcc -Wall -c -o md5.o md5.c +gcc -Wall -c -o utils.o utils.c +gcc -Wall -L. $LINKER_OPTIONS md5.o utils.o -o premise premise.c +gcc -Wall -L. $LINKER_OPTIONS md5.o utils.o -o conclusion conclusion.c diff --git a/demo/md5.c b/demo/md5.c new file mode 100644 index 0000000..c35d96c --- /dev/null +++ b/demo/md5.c @@ -0,0 +1,381 @@ +/* + Copyright (C) 1999, 2000, 2002 Aladdin Enterprises. All rights reserved. + + This software is provided 'as-is', without any express or implied + warranty. In no event will the authors be held liable for any damages + arising from the use of this software. + + Permission is granted to anyone to use this software for any purpose, + including commercial applications, and to alter it and redistribute it + freely, subject to the following restrictions: + + 1. The origin of this software must not be misrepresented; you must not + claim that you wrote the original software. If you use this software + in a product, an acknowledgment in the product documentation would be + appreciated but is not required. + 2. Altered source versions must be plainly marked as such, and must not be + misrepresented as being the original software. + 3. This notice may not be removed or altered from any source distribution. + + L. Peter Deutsch + ghost@aladdin.com + + */ +/* $Id: md5.c,v 1.6 2002/04/13 19:20:28 lpd Exp $ */ +/* + Independent implementation of MD5 (RFC 1321). + + This code implements the MD5 Algorithm defined in RFC 1321, whose + text is available at + http://www.ietf.org/rfc/rfc1321.txt + The code is derived from the text of the RFC, including the test suite + (section A.5) but excluding the rest of Appendix A. It does not include + any code or documentation that is identified in the RFC as being + copyrighted. + + The original and principal author of md5.c is L. Peter Deutsch + . Other authors are noted in the change history + that follows (in reverse chronological order): + + 2002-04-13 lpd Clarified derivation from RFC 1321; now handles byte order + either statically or dynamically; added missing #include + in library. + 2002-03-11 lpd Corrected argument list for main(), and added int return + type, in test program and T value program. + 2002-02-21 lpd Added missing #include in test program. + 2000-07-03 lpd Patched to eliminate warnings about "constant is + unsigned in ANSI C, signed in traditional"; made test program + self-checking. + 1999-11-04 lpd Edited comments slightly for automatic TOC extraction. + 1999-10-18 lpd Fixed typo in header comment (ansi2knr rather than md5). + 1999-05-03 lpd Original version. + */ + +#include "md5.h" +#include + +#undef BYTE_ORDER /* 1 = big-endian, -1 = little-endian, 0 = unknown */ +#ifdef ARCH_IS_BIG_ENDIAN +# define BYTE_ORDER (ARCH_IS_BIG_ENDIAN ? 1 : -1) +#else +# define BYTE_ORDER 0 +#endif + +#define T_MASK ((md5_word_t)~0) +#define T1 /* 0xd76aa478 */ (T_MASK ^ 0x28955b87) +#define T2 /* 0xe8c7b756 */ (T_MASK ^ 0x173848a9) +#define T3 0x242070db +#define T4 /* 0xc1bdceee */ (T_MASK ^ 0x3e423111) +#define T5 /* 0xf57c0faf */ (T_MASK ^ 0x0a83f050) +#define T6 0x4787c62a +#define T7 /* 0xa8304613 */ (T_MASK ^ 0x57cfb9ec) +#define T8 /* 0xfd469501 */ (T_MASK ^ 0x02b96afe) +#define T9 0x698098d8 +#define T10 /* 0x8b44f7af */ (T_MASK ^ 0x74bb0850) +#define T11 /* 0xffff5bb1 */ (T_MASK ^ 0x0000a44e) +#define T12 /* 0x895cd7be */ (T_MASK ^ 0x76a32841) +#define T13 0x6b901122 +#define T14 /* 0xfd987193 */ (T_MASK ^ 0x02678e6c) +#define T15 /* 0xa679438e */ (T_MASK ^ 0x5986bc71) +#define T16 0x49b40821 +#define T17 /* 0xf61e2562 */ (T_MASK ^ 0x09e1da9d) +#define T18 /* 0xc040b340 */ (T_MASK ^ 0x3fbf4cbf) +#define T19 0x265e5a51 +#define T20 /* 0xe9b6c7aa */ (T_MASK ^ 0x16493855) +#define T21 /* 0xd62f105d */ (T_MASK ^ 0x29d0efa2) +#define T22 0x02441453 +#define T23 /* 0xd8a1e681 */ (T_MASK ^ 0x275e197e) +#define T24 /* 0xe7d3fbc8 */ (T_MASK ^ 0x182c0437) +#define T25 0x21e1cde6 +#define T26 /* 0xc33707d6 */ (T_MASK ^ 0x3cc8f829) +#define T27 /* 0xf4d50d87 */ (T_MASK ^ 0x0b2af278) +#define T28 0x455a14ed +#define T29 /* 0xa9e3e905 */ (T_MASK ^ 0x561c16fa) +#define T30 /* 0xfcefa3f8 */ (T_MASK ^ 0x03105c07) +#define T31 0x676f02d9 +#define T32 /* 0x8d2a4c8a */ (T_MASK ^ 0x72d5b375) +#define T33 /* 0xfffa3942 */ (T_MASK ^ 0x0005c6bd) +#define T34 /* 0x8771f681 */ (T_MASK ^ 0x788e097e) +#define T35 0x6d9d6122 +#define T36 /* 0xfde5380c */ (T_MASK ^ 0x021ac7f3) +#define T37 /* 0xa4beea44 */ (T_MASK ^ 0x5b4115bb) +#define T38 0x4bdecfa9 +#define T39 /* 0xf6bb4b60 */ (T_MASK ^ 0x0944b49f) +#define T40 /* 0xbebfbc70 */ (T_MASK ^ 0x4140438f) +#define T41 0x289b7ec6 +#define T42 /* 0xeaa127fa */ (T_MASK ^ 0x155ed805) +#define T43 /* 0xd4ef3085 */ (T_MASK ^ 0x2b10cf7a) +#define T44 0x04881d05 +#define T45 /* 0xd9d4d039 */ (T_MASK ^ 0x262b2fc6) +#define T46 /* 0xe6db99e5 */ (T_MASK ^ 0x1924661a) +#define T47 0x1fa27cf8 +#define T48 /* 0xc4ac5665 */ (T_MASK ^ 0x3b53a99a) +#define T49 /* 0xf4292244 */ (T_MASK ^ 0x0bd6ddbb) +#define T50 0x432aff97 +#define T51 /* 0xab9423a7 */ (T_MASK ^ 0x546bdc58) +#define T52 /* 0xfc93a039 */ (T_MASK ^ 0x036c5fc6) +#define T53 0x655b59c3 +#define T54 /* 0x8f0ccc92 */ (T_MASK ^ 0x70f3336d) +#define T55 /* 0xffeff47d */ (T_MASK ^ 0x00100b82) +#define T56 /* 0x85845dd1 */ (T_MASK ^ 0x7a7ba22e) +#define T57 0x6fa87e4f +#define T58 /* 0xfe2ce6e0 */ (T_MASK ^ 0x01d3191f) +#define T59 /* 0xa3014314 */ (T_MASK ^ 0x5cfebceb) +#define T60 0x4e0811a1 +#define T61 /* 0xf7537e82 */ (T_MASK ^ 0x08ac817d) +#define T62 /* 0xbd3af235 */ (T_MASK ^ 0x42c50dca) +#define T63 0x2ad7d2bb +#define T64 /* 0xeb86d391 */ (T_MASK ^ 0x14792c6e) + + +static void +md5_process(md5_state_t *pms, const md5_byte_t *data /*[64]*/) +{ + md5_word_t + a = pms->abcd[0], b = pms->abcd[1], + c = pms->abcd[2], d = pms->abcd[3]; + md5_word_t t; +#if BYTE_ORDER > 0 + /* Define storage only for big-endian CPUs. */ + md5_word_t X[16]; +#else + /* Define storage for little-endian or both types of CPUs. */ + md5_word_t xbuf[16]; + const md5_word_t *X; +#endif + + { +#if BYTE_ORDER == 0 + /* + * Determine dynamically whether this is a big-endian or + * little-endian machine, since we can use a more efficient + * algorithm on the latter. + */ + static const int w = 1; + + if (*((const md5_byte_t *)&w)) /* dynamic little-endian */ +#endif +#if BYTE_ORDER <= 0 /* little-endian */ + { + /* + * On little-endian machines, we can process properly aligned + * data without copying it. + */ + if (!((data - (const md5_byte_t *)0) & 3)) { + /* data are properly aligned */ + X = (const md5_word_t *)data; + } else { + /* not aligned */ + memcpy(xbuf, data, 64); + X = xbuf; + } + } +#endif +#if BYTE_ORDER == 0 + else /* dynamic big-endian */ +#endif +#if BYTE_ORDER >= 0 /* big-endian */ + { + /* + * On big-endian machines, we must arrange the bytes in the + * right order. + */ + const md5_byte_t *xp = data; + int i; + +# if BYTE_ORDER == 0 + X = xbuf; /* (dynamic only) */ +# else +# define xbuf X /* (static only) */ +# endif + for (i = 0; i < 16; ++i, xp += 4) + xbuf[i] = xp[0] + (xp[1] << 8) + (xp[2] << 16) + (xp[3] << 24); + } +#endif + } + +#define ROTATE_LEFT(x, n) (((x) << (n)) | ((x) >> (32 - (n)))) + + /* Round 1. */ + /* Let [abcd k s i] denote the operation + a = b + ((a + F(b,c,d) + X[k] + T[i]) <<< s). */ +#define F(x, y, z) (((x) & (y)) | (~(x) & (z))) +#define SET(a, b, c, d, k, s, Ti)\ + t = a + F(b,c,d) + X[k] + Ti;\ + a = ROTATE_LEFT(t, s) + b + /* Do the following 16 operations. */ + SET(a, b, c, d, 0, 7, T1); + SET(d, a, b, c, 1, 12, T2); + SET(c, d, a, b, 2, 17, T3); + SET(b, c, d, a, 3, 22, T4); + SET(a, b, c, d, 4, 7, T5); + SET(d, a, b, c, 5, 12, T6); + SET(c, d, a, b, 6, 17, T7); + SET(b, c, d, a, 7, 22, T8); + SET(a, b, c, d, 8, 7, T9); + SET(d, a, b, c, 9, 12, T10); + SET(c, d, a, b, 10, 17, T11); + SET(b, c, d, a, 11, 22, T12); + SET(a, b, c, d, 12, 7, T13); + SET(d, a, b, c, 13, 12, T14); + SET(c, d, a, b, 14, 17, T15); + SET(b, c, d, a, 15, 22, T16); +#undef SET + + /* Round 2. */ + /* Let [abcd k s i] denote the operation + a = b + ((a + G(b,c,d) + X[k] + T[i]) <<< s). */ +#define G(x, y, z) (((x) & (z)) | ((y) & ~(z))) +#define SET(a, b, c, d, k, s, Ti)\ + t = a + G(b,c,d) + X[k] + Ti;\ + a = ROTATE_LEFT(t, s) + b + /* Do the following 16 operations. */ + SET(a, b, c, d, 1, 5, T17); + SET(d, a, b, c, 6, 9, T18); + SET(c, d, a, b, 11, 14, T19); + SET(b, c, d, a, 0, 20, T20); + SET(a, b, c, d, 5, 5, T21); + SET(d, a, b, c, 10, 9, T22); + SET(c, d, a, b, 15, 14, T23); + SET(b, c, d, a, 4, 20, T24); + SET(a, b, c, d, 9, 5, T25); + SET(d, a, b, c, 14, 9, T26); + SET(c, d, a, b, 3, 14, T27); + SET(b, c, d, a, 8, 20, T28); + SET(a, b, c, d, 13, 5, T29); + SET(d, a, b, c, 2, 9, T30); + SET(c, d, a, b, 7, 14, T31); + SET(b, c, d, a, 12, 20, T32); +#undef SET + + /* Round 3. */ + /* Let [abcd k s t] denote the operation + a = b + ((a + H(b,c,d) + X[k] + T[i]) <<< s). */ +#define H(x, y, z) ((x) ^ (y) ^ (z)) +#define SET(a, b, c, d, k, s, Ti)\ + t = a + H(b,c,d) + X[k] + Ti;\ + a = ROTATE_LEFT(t, s) + b + /* Do the following 16 operations. */ + SET(a, b, c, d, 5, 4, T33); + SET(d, a, b, c, 8, 11, T34); + SET(c, d, a, b, 11, 16, T35); + SET(b, c, d, a, 14, 23, T36); + SET(a, b, c, d, 1, 4, T37); + SET(d, a, b, c, 4, 11, T38); + SET(c, d, a, b, 7, 16, T39); + SET(b, c, d, a, 10, 23, T40); + SET(a, b, c, d, 13, 4, T41); + SET(d, a, b, c, 0, 11, T42); + SET(c, d, a, b, 3, 16, T43); + SET(b, c, d, a, 6, 23, T44); + SET(a, b, c, d, 9, 4, T45); + SET(d, a, b, c, 12, 11, T46); + SET(c, d, a, b, 15, 16, T47); + SET(b, c, d, a, 2, 23, T48); +#undef SET + + /* Round 4. */ + /* Let [abcd k s t] denote the operation + a = b + ((a + I(b,c,d) + X[k] + T[i]) <<< s). */ +#define I(x, y, z) ((y) ^ ((x) | ~(z))) +#define SET(a, b, c, d, k, s, Ti)\ + t = a + I(b,c,d) + X[k] + Ti;\ + a = ROTATE_LEFT(t, s) + b + /* Do the following 16 operations. */ + SET(a, b, c, d, 0, 6, T49); + SET(d, a, b, c, 7, 10, T50); + SET(c, d, a, b, 14, 15, T51); + SET(b, c, d, a, 5, 21, T52); + SET(a, b, c, d, 12, 6, T53); + SET(d, a, b, c, 3, 10, T54); + SET(c, d, a, b, 10, 15, T55); + SET(b, c, d, a, 1, 21, T56); + SET(a, b, c, d, 8, 6, T57); + SET(d, a, b, c, 15, 10, T58); + SET(c, d, a, b, 6, 15, T59); + SET(b, c, d, a, 13, 21, T60); + SET(a, b, c, d, 4, 6, T61); + SET(d, a, b, c, 11, 10, T62); + SET(c, d, a, b, 2, 15, T63); + SET(b, c, d, a, 9, 21, T64); +#undef SET + + /* Then perform the following additions. (That is increment each + of the four registers by the value it had before this block + was started.) */ + pms->abcd[0] += a; + pms->abcd[1] += b; + pms->abcd[2] += c; + pms->abcd[3] += d; +} + +void +md5_init(md5_state_t *pms) +{ + pms->count[0] = pms->count[1] = 0; + pms->abcd[0] = 0x67452301; + pms->abcd[1] = /*0xefcdab89*/ T_MASK ^ 0x10325476; + pms->abcd[2] = /*0x98badcfe*/ T_MASK ^ 0x67452301; + pms->abcd[3] = 0x10325476; +} + +void +md5_append(md5_state_t *pms, const md5_byte_t *data, int nbytes) +{ + const md5_byte_t *p = data; + int left = nbytes; + int offset = (pms->count[0] >> 3) & 63; + md5_word_t nbits = (md5_word_t)(nbytes << 3); + + if (nbytes <= 0) + return; + + /* Update the message length. */ + pms->count[1] += nbytes >> 29; + pms->count[0] += nbits; + if (pms->count[0] < nbits) + pms->count[1]++; + + /* Process an initial partial block. */ + if (offset) { + int copy = (offset + nbytes > 64 ? 64 - offset : nbytes); + + memcpy(pms->buf + offset, p, copy); + if (offset + copy < 64) + return; + p += copy; + left -= copy; + md5_process(pms, pms->buf); + } + + /* Process full blocks. */ + for (; left >= 64; p += 64, left -= 64) + md5_process(pms, p); + + /* Process a final partial block. */ + if (left) + memcpy(pms->buf, p, left); +} + +void +md5_finish(md5_state_t *pms, md5_byte_t digest[16]) +{ + static const md5_byte_t pad[64] = { + 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 + }; + md5_byte_t data[8]; + int i; + + /* Save the length before padding. */ + for (i = 0; i < 8; ++i) + data[i] = (md5_byte_t)(pms->count[i >> 2] >> ((i & 3) << 3)); + /* Pad to 56 bytes mod 64. */ + md5_append(pms, pad, ((55 - (pms->count[0] >> 3)) & 63) + 1); + /* Append the length. */ + md5_append(pms, data, 8); + for (i = 0; i < 16; ++i) + digest[i] = (md5_byte_t)(pms->abcd[i >> 2] >> ((i & 3) << 3)); +} diff --git a/demo/md5.h b/demo/md5.h new file mode 100644 index 0000000..698c995 --- /dev/null +++ b/demo/md5.h @@ -0,0 +1,91 @@ +/* + Copyright (C) 1999, 2002 Aladdin Enterprises. All rights reserved. + + This software is provided 'as-is', without any express or implied + warranty. In no event will the authors be held liable for any damages + arising from the use of this software. + + Permission is granted to anyone to use this software for any purpose, + including commercial applications, and to alter it and redistribute it + freely, subject to the following restrictions: + + 1. The origin of this software must not be misrepresented; you must not + claim that you wrote the original software. If you use this software + in a product, an acknowledgment in the product documentation would be + appreciated but is not required. + 2. Altered source versions must be plainly marked as such, and must not be + misrepresented as being the original software. + 3. This notice may not be removed or altered from any source distribution. + + L. Peter Deutsch + ghost@aladdin.com + + */ +/* $Id: md5.h,v 1.4 2002/04/13 19:20:28 lpd Exp $ */ +/* + Independent implementation of MD5 (RFC 1321). + + This code implements the MD5 Algorithm defined in RFC 1321, whose + text is available at + http://www.ietf.org/rfc/rfc1321.txt + The code is derived from the text of the RFC, including the test suite + (section A.5) but excluding the rest of Appendix A. It does not include + any code or documentation that is identified in the RFC as being + copyrighted. + + The original and principal author of md5.h is L. Peter Deutsch + . Other authors are noted in the change history + that follows (in reverse chronological order): + + 2002-04-13 lpd Removed support for non-ANSI compilers; removed + references to Ghostscript; clarified derivation from RFC 1321; + now handles byte order either statically or dynamically. + 1999-11-04 lpd Edited comments slightly for automatic TOC extraction. + 1999-10-18 lpd Fixed typo in header comment (ansi2knr rather than md5); + added conditionalization for C++ compilation from Martin + Purschke . + 1999-05-03 lpd Original version. + */ + +#ifndef md5_INCLUDED +# define md5_INCLUDED + +/* + * This package supports both compile-time and run-time determination of CPU + * byte order. If ARCH_IS_BIG_ENDIAN is defined as 0, the code will be + * compiled to run only on little-endian CPUs; if ARCH_IS_BIG_ENDIAN is + * defined as non-zero, the code will be compiled to run only on big-endian + * CPUs; if ARCH_IS_BIG_ENDIAN is not defined, the code will be compiled to + * run on either big- or little-endian CPUs, but will run slightly less + * efficiently on either one than if ARCH_IS_BIG_ENDIAN is defined. + */ + +typedef unsigned char md5_byte_t; /* 8-bit byte */ +typedef unsigned int md5_word_t; /* 32-bit word */ + +/* Define the state of the MD5 Algorithm. */ +typedef struct md5_state_s { + md5_word_t count[2]; /* message length in bits, lsw first */ + md5_word_t abcd[4]; /* digest buffer */ + md5_byte_t buf[64]; /* accumulate block */ +} md5_state_t; + +#ifdef __cplusplus +extern "C" +{ +#endif + +/* Initialize the algorithm. */ +void md5_init(md5_state_t *pms); + +/* Append a string to the message. */ +void md5_append(md5_state_t *pms, const md5_byte_t *data, int nbytes); + +/* Finish the message and return the digest. */ +void md5_finish(md5_state_t *pms, md5_byte_t digest[16]); + +#ifdef __cplusplus +} /* end extern "C" */ +#endif + +#endif /* md5_INCLUDED */ diff --git a/demo/params.txt b/demo/params.txt new file mode 100644 index 0000000..dc468c6 --- /dev/null +++ b/demo/params.txt @@ -0,0 +1,20 @@ +# These parameters control how Mrs. Premise and Mrs. Conclusion behave. + +# ITERATIONS is the number of times they'll talk to one another. +# LIVE_DANGEROUSLY is a Boolean (0 or 1); if set to 1 the programs +# won't use the semaphore to coordinate access to the shared +# memory. Corruption will likely result. +# SEMAPHORE_NAME is the name to be used for the semaphore. +# SHARED_MEMORY_NAME is the name to be used for the shared memory. +# PERMISSIONS are in octal (note the leading 0). +# SHM_SIZE is the size of the shared memory segment in bytes. + +ITERATIONS=1000 +LIVE_DANGEROUSLY=0 +SEMAPHORE_NAME=/wuthering_heights +SHARED_MEMORY_NAME=/four_yorkshiremen +PERMISSIONS=0600 +SHM_SIZE=4096 + + + diff --git a/demo/premise.c b/demo/premise.c new file mode 100644 index 0000000..43d5207 --- /dev/null +++ b/demo/premise.c @@ -0,0 +1,223 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "md5.h" +#include "utils.h" + +const char MY_NAME[] = "Mrs. Premise"; + +// Set up a Mrs. Premise & Mrs. Conclusion conversation. + +void get_current_time(char *); + +int main() { + sem_t *pSemaphore = NULL; + int rc; + char s[1024]; + void *pSharedMemory = NULL; + char last_message_i_wrote[256]; + char md5ified_message[256]; + int i = 0; + int done = 0; + int fd; + struct param_struct params; + + say(MY_NAME, "Oooo 'ello, I'm Mrs. Premise!"); + + read_params(¶ms); + + // Create the shared memory + fd = shm_open(params.shared_memory_name, O_RDWR | O_CREAT | O_EXCL, params.permissions); + + if (fd == -1) { + fd = 0; + sprintf(s, "Creating the shared memory failed; errno is %d", errno); + say(MY_NAME, s); + } + else { + // The memory is created as a file that's 0 bytes long. Resize it. + rc = ftruncate(fd, params.size); + if (rc) { + sprintf(s, "Resizing the shared memory failed; errno is %d", errno); + say(MY_NAME, s); + } + else { + // MMap the shared memory + //void *mmap(void *start, size_t length, int prot, int flags, int fd, off_t offset); + pSharedMemory = mmap((void *)0, (size_t)params.size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); + if (pSharedMemory == MAP_FAILED) { + pSharedMemory = NULL; + sprintf(s, "MMapping the shared memory failed; errno is %d", errno); + say(MY_NAME, s); + } + else { + sprintf(s, "pSharedMemory = %p", pSharedMemory); + say(MY_NAME, s); + } + } + } + + if (pSharedMemory) { + // Create the semaphore + pSemaphore = sem_open(params.semaphore_name, O_CREAT, params.permissions, 0); + + if (pSemaphore == SEM_FAILED) { + sprintf(s, "Creating the semaphore failed; errno is %d", errno); + say(MY_NAME, s); + } + else { + sprintf(s, "the semaphore is %p", (void *)pSemaphore); + say(MY_NAME, s); + + // I seed the shared memory with a random string (the current time). + get_current_time(s); + + strcpy((char *)pSharedMemory, s); + strcpy(last_message_i_wrote, s); + + sprintf(s, "Wrote %zu characters: %s", strlen(last_message_i_wrote), last_message_i_wrote); + say(MY_NAME, s); + + i = 0; + while (!done) { + sprintf(s, "iteration %d", i); + say(MY_NAME, s); + + // Release the semaphore... + rc = release_semaphore(MY_NAME, pSemaphore, params.live_dangerously); + // ...and wait for it to become available again. In real code + // I might want to sleep briefly before calling .acquire() in + // order to politely give other processes an opportunity to grab + // the semaphore while it is free so as to avoid starvation. But + // this code is meant to be a stress test that maximizes the + // opportunity for shared memory corruption and politeness is + // not helpful in stress tests. + if (!rc) + rc = acquire_semaphore(MY_NAME, pSemaphore, params.live_dangerously); + + if (rc) + done = 1; + else { + // I keep checking the shared memory until something new has + // been written. + while ( (!rc) && \ + (!strcmp((char *)pSharedMemory, last_message_i_wrote)) + ) { + // Nothing new; give Mrs. Conclusion another change to respond. + sprintf(s, "Read %zu characters '%s'", strlen((char *)pSharedMemory), (char *)pSharedMemory); + say(MY_NAME, s); + rc = release_semaphore(MY_NAME, pSemaphore, params.live_dangerously); + if (!rc) { + rc = acquire_semaphore(MY_NAME, pSemaphore, params.live_dangerously); + } + } + + + if (rc) + done = 1; + else { + // What I read must be the md5 of what I wrote or something's + // gone wrong. + md5ify(last_message_i_wrote, md5ified_message); + + if (strcmp(md5ified_message, (char *)pSharedMemory) == 0) { + // Yes, the message is OK + i++; + if (i == params.iterations) + done = 1; + + // MD5 the reply and write back to Mrs. Conclusion. + md5ify(md5ified_message, md5ified_message); + + sprintf(s, "Writing %zu characters '%s'", strlen(md5ified_message), md5ified_message); + say(MY_NAME, s); + + strcpy((char *)pSharedMemory, md5ified_message); + strcpy((char *)last_message_i_wrote, md5ified_message); + } + else { + sprintf(s, "Shared memory corruption after %d iterations.", i); + say(MY_NAME, s); + sprintf(s, "Mismatch; new message is '%s', expected '%s'.", (char *)pSharedMemory, md5ified_message); + say(MY_NAME, s); + done = 1; + } + } + } + } + + // Announce for one last time that the semaphore is free again so that + // Mrs. Conclusion can exit. + say(MY_NAME, "Final release of the semaphore followed by a 5 second pause"); + rc = release_semaphore(MY_NAME, pSemaphore, params.live_dangerously); + sleep(5); + // ...before beginning to wait until it is free again. + // Technically, this is bad practice. It's possible that on a + // heavily loaded machine, Mrs. Conclusion wouldn't get a chance + // to acquire the semaphore. There really ought to be a loop here + // that waits for some sort of goodbye message but for purposes of + // simplicity I'm skipping that. + + say(MY_NAME, "Final wait to acquire the semaphore"); + rc = acquire_semaphore(MY_NAME, pSemaphore, params.live_dangerously); + if (!rc) { + say(MY_NAME, "Destroying the shared memory."); + + // Un-mmap the memory... + rc = munmap(pSharedMemory, (size_t)params.size); + if (rc) { + sprintf(s, "Unmapping the memory failed; errno is %d", errno); + say(MY_NAME, s); + } + + // ...close the file descriptor... + if (-1 == close(fd)) { + sprintf(s, "Closing the memory's file descriptor failed; errno is %d", errno); + say(MY_NAME, s); + } + + // ...and destroy the shared memory. + rc = shm_unlink(params.shared_memory_name); + if (rc) { + sprintf(s, "Unlinking the memory failed; errno is %d", errno); + say(MY_NAME, s); + } + } + } + + say(MY_NAME, "Destroying the semaphore."); + // Clean up the semaphore + rc = sem_close(pSemaphore); + if (rc) { + sprintf(s, "Closing the semaphore failed; errno is %d", errno); + say(MY_NAME, s); + } + rc = sem_unlink(params.semaphore_name); + if (rc) { + sprintf(s, "Unlinking the semaphore failed; errno is %d", errno); + say(MY_NAME, s); + } + } + return 0; +} + + +void get_current_time(char *s) { + time_t the_time; + struct tm *the_localtime; + char *pAscTime; + + the_time = time(NULL); + the_localtime = localtime(&the_time); + pAscTime = asctime(the_localtime); + + strcpy(s, pAscTime); +} diff --git a/demo/premise.py b/demo/premise.py new file mode 100644 index 0000000..55711e0 --- /dev/null +++ b/demo/premise.py @@ -0,0 +1,118 @@ +# Python modules +import time +import mmap +import os +import sys +PY_MAJOR_VERSION = sys.version_info[0] +# hashlib is only available in Python >= 2.5. I still want to support +# older Pythons so I import md5 if hashlib is not available. Fortunately +# md5 can masquerade as hashlib for my purposes. +try: + import hashlib +except ImportError: + import md5 as hashlib + +# 3rd party modules +import posix_ipc + +# Utils for this demo +import utils + + +utils.say("Oooo 'ello, I'm Mrs. Premise!") + +params = utils.read_params() + +# Create the shared memory and the semaphore. +memory = posix_ipc.SharedMemory(params["SHARED_MEMORY_NAME"], posix_ipc.O_CREX, + size=params["SHM_SIZE"]) +semaphore = posix_ipc.Semaphore(params["SEMAPHORE_NAME"], posix_ipc.O_CREX) + +# MMap the shared memory +mapfile = mmap.mmap(memory.fd, memory.size) + +# Once I've mmapped the file descriptor, I can close it without +# interfering with the mmap. +memory.close_fd() + +# I seed the shared memory with a random string (the current time). +what_i_wrote = time.asctime() +utils.write_to_memory(mapfile, what_i_wrote) + +for i in range(params["ITERATIONS"]): + utils.say("iteration %d" % i) + if not params["LIVE_DANGEROUSLY"]: + # Release the semaphore... + utils.say("Releasing the semaphore") + semaphore.release() + # ...and wait for it to become available again. In real code + # I might want to sleep briefly before calling .acquire() in + # order to politely give other processes an opportunity to grab + # the semaphore while it is free so as to avoid starvation. But + # this code is meant to be a stress test that maximizes the + # opportunity for shared memory corruption and politeness is + # not helpful in stress tests. + utils.say("Waiting to acquire the semaphore") + semaphore.acquire() + + s = utils.read_from_memory(mapfile) + + # I keep checking the shared memory until something new has + # been written. + while s == what_i_wrote: + # Nothing new; give Mrs. Conclusion another chance to respond. + if not params["LIVE_DANGEROUSLY"]: + utils.say("Releasing the semaphore") + semaphore.release() + utils.say("Waiting to acquire the semaphore") + semaphore.acquire() + + s = utils.read_from_memory(mapfile) + + # What I read must be the md5 of what I wrote or something's + # gone wrong. + if PY_MAJOR_VERSION > 2: + what_i_wrote = what_i_wrote.encode() + + try: + assert(s == hashlib.md5(what_i_wrote).hexdigest()) + except AssertionError: + utils.raise_error(AssertionError, + "Shared memory corruption after %d iterations." % i) + + # MD5 the reply and write back to Mrs. Conclusion. + if PY_MAJOR_VERSION > 2: + s = s.encode() + what_i_wrote = hashlib.md5(s).hexdigest() + utils.write_to_memory(mapfile, what_i_wrote) + +utils.say("") +utils.say("%d iterations complete" % (i + 1)) + +# Announce for one last time that the semaphore is free again so that +# Mrs. Conclusion can exit. +if not params["LIVE_DANGEROUSLY"]: + utils.say("") + utils.say("Final release of the semaphore followed by a 5 second pause") + semaphore.release() + time.sleep(5) + # ...before beginning to wait until it is free again. + # Technically, this is bad practice. It's possible that on a + # heavily loaded machine, Mrs. Conclusion wouldn't get a chance + # to acquire the semaphore. There really ought to be a loop here + # that waits for some sort of goodbye message but for purposes of + # simplicity I'm skipping that. + utils.say("Final wait to acquire the semaphore") + semaphore.acquire() + +utils.say("Destroying semaphore and shared memory.") +mapfile.close() +# I could call memory.unlink() here but in order to demonstrate +# unlinking at the module level I'll do it that way. +posix_ipc.unlink_shared_memory(params["SHARED_MEMORY_NAME"]) + +semaphore.release() + +# I could also unlink the semaphore by calling +# posix_ipc.unlink_semaphore() but I'll do it this way instead. +semaphore.unlink() diff --git a/demo/utils.c b/demo/utils.c new file mode 100644 index 0000000..e9682a4 --- /dev/null +++ b/demo/utils.c @@ -0,0 +1,119 @@ +#include +#include +#include +#include +#include +#include +#include + +#include "utils.h" +#include "md5.h" + + +void md5ify(char *inString, char *outString) { + md5_state_t state; + md5_byte_t digest[16]; + int i; + + md5_init(&state); + md5_append(&state, (const md5_byte_t *)inString, strlen(inString)); + md5_finish(&state, digest); + + for (i = 0; i < 16; i++) + sprintf(&outString[i * 2], "%02x", digest[i]); +} + +void say(const char *pName, char *pMessage) { + time_t the_time; + struct tm *the_localtime; + char timestamp[256]; + + the_time = time(NULL); + + the_localtime = localtime(&the_time); + + strftime(timestamp, 255, "%H:%M:%S", the_localtime); + + printf("%s @ %s: %s\n", pName, timestamp, pMessage); + +} + + +int release_semaphore(const char *pName, sem_t *pSemaphore, int live_dangerously) { + int rc = 0; + char s[1024]; + + say(pName, "Releasing the semaphore."); + + if (!live_dangerously) { + rc = sem_post(pSemaphore); + if (rc) { + sprintf(s, "Releasing the semaphore failed; errno is %d\n", errno); + say(pName, s); + } + } + + return rc; +} + + +int acquire_semaphore(const char *pName, sem_t *pSemaphore, int live_dangerously) { + int rc = 0; + char s[1024]; + + say(pName, "Waiting to acquire the semaphore."); + + if (!live_dangerously) { + rc = sem_wait(pSemaphore); + if (rc) { + sprintf(s, "Acquiring the semaphore failed; errno is %d\n", errno); + say(pName, s); + } + } + + return rc; +} + + +void read_params(struct param_struct *params) { + char line[1024]; + char name[1024]; + char value[1024]; + + FILE *fp; + + fp = fopen("params.txt", "r"); + + while (fgets(line, 1024, fp)) { + if (strlen(line) && ('#' == line[0])) + ; // comment in input, ignore + else { + sscanf(line, "%[ABCDEFGHIJKLMNOPQRSTUVWXYZ_abcdefghjiklmnopqrstuvwxyz]=%s\n", name, value); + + // printf("name = %s, value = %d\n", name, value); + + if (!strcmp(name, "ITERATIONS")) + params->iterations = atoi(value); + if (!strcmp(name, "LIVE_DANGEROUSLY")) + params->live_dangerously = atoi(value); + if (!strcmp(name, "SEMAPHORE_NAME")) + strcpy(params->semaphore_name, value); + if (!strcmp(name, "SHARED_MEMORY_NAME")) + strcpy(params->shared_memory_name, value); + if (!strcmp(name, "PERMISSIONS")) + params->permissions = (int)strtol(value, NULL, 8); + if (!strcmp(name, "SHM_SIZE")) + params->size = atoi(value); + + name[0] = '\0'; + value[0] = '\0'; + } + } + + printf("iterations = %d\n", params->iterations); + printf("danger = %d\n", params->live_dangerously); + printf("semaphore name = %s\n", params->semaphore_name); + printf("shared memory name = %s\n", params->shared_memory_name); + printf("permissions = %o\n", params->permissions); + printf("size = %d\n", params->size); +} diff --git a/demo/utils.h b/demo/utils.h new file mode 100644 index 0000000..48d083b --- /dev/null +++ b/demo/utils.h @@ -0,0 +1,18 @@ +struct param_struct { + int iterations; + int live_dangerously; + char semaphore_name[512]; + char shared_memory_name[512]; + int permissions; + int size; +}; + + +void md5ify(char *, char *); +void say(const char *, char *); +int acquire_semaphore(const char *, sem_t *, int); +int release_semaphore(const char *, sem_t *, int); +void read_params(struct param_struct *); + + + diff --git a/demo/utils.py b/demo/utils.py new file mode 100644 index 0000000..03b1b3a --- /dev/null +++ b/demo/utils.py @@ -0,0 +1,90 @@ +import time +import sys + +PY_MAJOR_VERSION = sys.version_info[0] + +if PY_MAJOR_VERSION > 2: + NULL_CHAR = 0 +else: + NULL_CHAR = '\0' + +def raise_error(error, message): + # I have to exec() this code because the Python 2 syntax is invalid + # under Python 3 and vice-versa. + s = "raise " + s += "error, message" if (PY_MAJOR_VERSION == 2) else "error(message)" + + exec(s) + + +def say(s): + """Prints a timestamped, self-identified message""" + who = sys.argv[0] + if who.endswith(".py"): + who = who[:-3] + + s = "%s@%1.6f: %s" % (who, time.time(), s) + print (s) + + +def write_to_memory(mapfile, s): + """Writes the string s to the mapfile""" + say("writing %s " % s) + mapfile.seek(0) + # I append a trailing NULL in case I'm communicating with a C program. + s += '\0' + if PY_MAJOR_VERSION > 2: + s = s.encode() + mapfile.write(s) + + +def read_from_memory(mapfile): + """Reads a string from the mapfile and returns that string""" + mapfile.seek(0) + s = [ ] + c = mapfile.read_byte() + while c != NULL_CHAR: + s.append(c) + c = mapfile.read_byte() + + if PY_MAJOR_VERSION > 2: + s = [chr(c) for c in s] + s = ''.join(s) + + say("read %s" % s) + + return s + + +def read_params(): + """Reads the contents of params.txt and returns them as a dict""" + params = { } + + f = open("params.txt") + + for line in f: + line = line.strip() + if line: + if line.startswith('#'): + pass # comment in input, ignore + else: + name, value = line.split('=') + name = name.upper().strip() + + if name == "PERMISSIONS": + # Think octal, young man! + value = int(value, 8) + elif "NAME" in name: + # This is a string; leave it alone. + pass + else: + value = int(value) + + #print "name = %s, value = %d" % (name, value) + + params[name] = value + + f.close() + + return params + diff --git a/demo2/ReadMe.txt b/demo2/ReadMe.txt new file mode 100644 index 0000000..fda0ce2 --- /dev/null +++ b/demo2/ReadMe.txt @@ -0,0 +1,41 @@ +This demonstrates use of message queues via two applications named after +Mrs. Premise and Mrs. Conclusion of the Monty Python sketch. +http://www.youtube.com/watch?v=crIJvcWkVcs + +Like those two characters, these programs chat back and forth and the result +is a lot of nonsense. In this case, what the programs are saying isn't the +interesting part. What's interesting is how they're doing it. + +Mrs. Premise and Mrs. Conclusion (the programs, not the sketch characters) +communicate with POSIX message queues. + +Mrs. Premise starts things off by creating the queue and sending a random +string (the current time) to it. She then sits in a loop receiving whatever +message is on the queue. If it is the same message she wrote, she sends it +back to the queue. If it is a new message, it must be from Mrs. Conclusion. + +Meanwhile, Mrs. Conclusion is doing exactly the same thing, except that she +assumes Mrs. Premise will write the first message. + +When either of these programs receives a new message, they send back an +md5 hash of that message. This serves two purposes. First, it ensures that +subsequent messages are very different so that if a message somehow gets +corrupted (say by being partially overwritten by the next message), it will +not escape notice. Second, it ensures that corruption can be detected if +it happens, because Mrs. Premise and Mrs. Conclusion can calculate what the +other's response to their message should be. + +Since message queues manage all of the concurrency issues transparently, +Mrs. Premise and Mrs. Conclusion won't ever find their messages corrupted +no matter how many messages they exchange. You can experiment with this by +setting ITERATIONS in params.txt to a very large value. + +These programs are not meant as a demostration on how to make best use of a +message queue. In fact, they're very badly behaved because they poll the +queue as fast as possible -- they'll send your CPU usage right up to 100%. +Remember, they're trying as hard as they can to step one another so as to +expose any concurrency problems that might be present. + +Real code would want to sleep (or do something useful) in between calling +send() and receive(). + diff --git a/demo2/SampleIpcConversation.png b/demo2/SampleIpcConversation.png new file mode 100644 index 0000000..9d11e7a Binary files /dev/null and b/demo2/SampleIpcConversation.png differ diff --git a/demo2/cleanup.py b/demo2/cleanup.py new file mode 100755 index 0000000..04e21e5 --- /dev/null +++ b/demo2/cleanup.py @@ -0,0 +1,15 @@ +import posix_ipc +import utils + +params = utils.read_params() + +try: + posix_ipc.unlink_message_queue(params["MESSAGE_QUEUE_NAME"]) + s = "message queue %s removed" % params["MESSAGE_QUEUE_NAME"] + print (s) +except: + print ("queue doesn't need cleanup") + + + +print ("\nAll clean!") diff --git a/demo2/conclusion.py b/demo2/conclusion.py new file mode 100644 index 0000000..d857663 --- /dev/null +++ b/demo2/conclusion.py @@ -0,0 +1,65 @@ +# Python modules +import time +import sys +PY_MAJOR_VERSION = sys.version_info[0] +# hashlib is only available in Python >= 2.5. I still want to support +# older Pythons so I import md5 if hashlib is not available. Fortunately +# md5 can masquerade as hashlib for my purposes. +try: + import hashlib +except ImportError: + import md5 as hashlib + +# 3rd party modules +import posix_ipc + +# Utils for this demo +import utils + + +utils.say("Oooo 'ello, I'm Mrs. Conclusion!") + +params = utils.read_params() + +# Mrs. Premise has already created the message queue. I just need a handle +# to it. +mq = posix_ipc.MessageQueue(params["MESSAGE_QUEUE_NAME"]) + +what_i_sent = "" + +for i in range(0, params["ITERATIONS"]): + utils.say("iteration %d" % i) + + s, _ = mq.receive() + s = s.decode() + utils.say("Received %s" % s) + + while s == what_i_sent: + # Nothing new; give Mrs. Premise another chance to respond. + mq.send(s) + + s, _ = mq.receive() + s = s.decode() + utils.say("Received %s" % s) + + if what_i_sent: + if PY_MAJOR_VERSION > 2: + what_i_sent = what_i_sent.encode() + try: + assert(s == hashlib.md5(what_i_sent).hexdigest()) + except AssertionError: + utils.raise_error(AssertionError, + "Message corruption after %d iterations." % i) + #else: + # When what_i_sent is blank, this is the first message which + # I always accept without question. + + # MD5 the reply and write back to Mrs. Premise. + s = hashlib.md5(s.encode()).hexdigest() + utils.say("Sending %s" % s) + mq.send(s) + what_i_sent = s + + +utils.say("") +utils.say("%d iterations complete" % (i + 1)) diff --git a/demo2/params.txt b/demo2/params.txt new file mode 100644 index 0000000..e8fa277 --- /dev/null +++ b/demo2/params.txt @@ -0,0 +1,4 @@ +# These parameters control how Mrs. Premise and Mrs. Conclusion behave. + +ITERATIONS=1000 +MESSAGE_QUEUE_NAME=/my_message_queue diff --git a/demo2/premise.py b/demo2/premise.py new file mode 100644 index 0000000..9de9a09 --- /dev/null +++ b/demo2/premise.py @@ -0,0 +1,75 @@ +# Python modules +import time +import sys +PY_MAJOR_VERSION = sys.version_info[0] +# hashlib is only available in Python >= 2.5. I still want to support +# older Pythons so I import md5 if hashlib is not available. Fortunately +# md5 can masquerade as hashlib for my purposes. +try: + import hashlib +except ImportError: + import md5 as hashlib + +# 3rd party modules +import posix_ipc + +# Utils for this demo +import utils + + +utils.say("Oooo 'ello, I'm Mrs. Premise!") + +params = utils.read_params() + +# Create the message queue. +mq = posix_ipc.MessageQueue(params["MESSAGE_QUEUE_NAME"], posix_ipc.O_CREX) + +# The first message is a random string (the current time). +s = time.asctime() +utils.say("Sending %s" % s) +mq.send(s) +what_i_sent = s + +for i in range(0, params["ITERATIONS"]): + utils.say("iteration %d" % i) + + s, _ = mq.receive() + s = s.decode() + utils.say("Received %s" % s) + + # If the message is what I wrote, put it back on the queue. + while s == what_i_sent: + # Nothing new; give Mrs. Conclusion another chance to respond. + mq.send(s) + + s, _ = mq.receive() + s = s.decode() + utils.say("Received %s" % s) + + # What I read must be the md5 of what I wrote or something's + # gone wrong. + if PY_MAJOR_VERSION > 2: + what_i_sent = what_i_sent.encode() + + try: + assert(s == hashlib.md5(what_i_sent).hexdigest()) + except AssertionError: + utils.raise_error(AssertionError, + "Message corruption after %d iterations." % i) + + + # MD5 the reply and write back to Mrs. Conclusion. + s = hashlib.md5(s.encode()).hexdigest() + utils.say("Sending %s" % s) + mq.send(s) + what_i_sent = s + +utils.say("") +utils.say("%d iterations complete" % (i + 1)) + +utils.say("Destroying the message queue.") +mq.close() +# I could call simply mq.unlink() here but in order to demonstrate +# unlinking at the module level I'll do it that way. +posix_ipc.unlink_message_queue(params["MESSAGE_QUEUE_NAME"]) + diff --git a/demo2/utils.py b/demo2/utils.py new file mode 100644 index 0000000..31e2b05 --- /dev/null +++ b/demo2/utils.py @@ -0,0 +1,42 @@ +import time +import sys + +def say(s): + who = sys.argv[0] + if who.endswith(".py"): + who = who[:-3] + + s = "%s@%1.6f: %s" % (who, time.time(), s) + print (s) + + +def read_params(): + params = { } + + f = open("params.txt") + + for line in f: + line = line.strip() + if len(line): + if line.startswith('#'): + pass # comment in input, ignore + else: + name, value = line.split('=') + name = name.upper().strip() + + if name == "PERMISSIONS": + value = int(value, 8) + elif "NAME" in name: + # This is a string; leave it alone. + pass + else: + value = int(value) + + #print "name = %s, value = %d" % (name, value) + + params[name] = value + + f.close() + + return params + diff --git a/demo3/ReadMe.txt b/demo3/ReadMe.txt new file mode 100644 index 0000000..16f1fc5 --- /dev/null +++ b/demo3/ReadMe.txt @@ -0,0 +1,12 @@ +These scripts demonstrate four message queue notification techniques. + +All of demos ask you to enter a message. That message is then sent to the +queue and received in a notification handler and printed to stdout. + +- one_shot_signal.py and one_shot_thread.py receive their notifications via a +signal and thread, respectively. After one message & notification, the demo +exits. + +- repeating_signal.py and repeating_thread.py are similar, except that they +re-register for notifications in their notification handler so you can +enter as many messages as you like. diff --git a/demo3/cleanup.py b/demo3/cleanup.py new file mode 100755 index 0000000..53ada42 --- /dev/null +++ b/demo3/cleanup.py @@ -0,0 +1,13 @@ +import posix_ipc +import utils + +try: + posix_ipc.unlink_message_queue(utils.QUEUE_NAME) + s = "message queue %s removed" % utils.QUEUE_NAME + print (s) +except: + print ("queue doesn't need cleanup") + + + +print ("\nAll clean!") diff --git a/demo3/one_shot_signal.py b/demo3/one_shot_signal.py new file mode 100644 index 0000000..23efe08 --- /dev/null +++ b/demo3/one_shot_signal.py @@ -0,0 +1,45 @@ +# Python modules +import time +import signal + +# 3rd party modules +import posix_ipc + +# Utils for this demo +import utils + + +MY_SIGNAL = signal.SIGUSR1 + + +def handle_signal(signal_number, stack_frame): + message, priority = mq.receive() + + print ("Ding! Message with priority %d received: %s" % (priority, message)) + + + +# Create the message queue. +mq = posix_ipc.MessageQueue(utils.QUEUE_NAME, posix_ipc.O_CREX) + +# Request notifications +mq.request_notification(MY_SIGNAL) + +# Register my signal handler +signal.signal(MY_SIGNAL, handle_signal) + +# Get user input and send it to the queue. +print ("Enter a message:") +mq.send(utils.get_input()) + +# The signal fires almost instantly, but if I don't pause at least +# briefly then the main thread may exit before the notification fires. +print ("Sleeping for one second to allow the notification to happen.") +time.sleep(1) + +print ("Destroying the message queue.") +mq.close() +# I could call simply mq.unlink() here but in order to demonstrate +# unlinking at the module level I'll do it that way. +posix_ipc.unlink_message_queue(utils.QUEUE_NAME) + diff --git a/demo3/one_shot_thread.py b/demo3/one_shot_thread.py new file mode 100644 index 0000000..c669370 --- /dev/null +++ b/demo3/one_shot_thread.py @@ -0,0 +1,38 @@ +# Python modules +import time + +# 3rd party modules +import posix_ipc + +# Utils for this demo +import utils + + +def process_notification(mq): + message, priority = mq.receive() + + print ("Ding! Message with priority %d received: %s" % (priority, message)) + + + +# Create the message queue. +mq = posix_ipc.MessageQueue(utils.QUEUE_NAME, posix_ipc.O_CREX) + +# Request notifications +mq.request_notification( (process_notification, mq) ) + +# Get user input and send it to the queue. +print ("Enter a message:") +mq.send(utils.get_input()) + +# The callback happens almost instantly, but if I don't pause at least +# briefly then the main thread may exit before the notification fires. +print ("Sleeping for one second to allow the notification to happen.") +time.sleep(1) + +print ("Destroying the message queue.") +mq.close() +# I could call simply mq.unlink() here but in order to demonstrate +# unlinking at the module level I'll do it that way. +posix_ipc.unlink_message_queue(utils.QUEUE_NAME) + diff --git a/demo3/repeating_signal.py b/demo3/repeating_signal.py new file mode 100644 index 0000000..2fed73f --- /dev/null +++ b/demo3/repeating_signal.py @@ -0,0 +1,46 @@ +# Python modules +import time +import signal + +# 3rd party modules +import posix_ipc + +# Utils for this demo +import utils + + +MY_SIGNAL = signal.SIGUSR1 + + +def handle_signal(signal_number, stack_frame): + message, priority = mq.receive() + + print ("Ding! Message with priority %d received: %s" % (priority, message)) + + # Re-register for notifications + mq.request_notification(MY_SIGNAL) + + +# Create the message queue. +mq = posix_ipc.MessageQueue(utils.QUEUE_NAME, posix_ipc.O_CREX) + +# Request notifications +mq.request_notification(MY_SIGNAL) + +# Register my signal handler +signal.signal(MY_SIGNAL, handle_signal) + +# Get user input and send it to the queue. +msg = "42" +while msg: + print ("\nEnter a message. A blank message will end the demo:") + msg = utils.get_input() + if msg: + mq.send(msg) + +print ("Destroying the message queue.") +mq.close() +# I could call simply mq.unlink() here but in order to demonstrate +# unlinking at the module level I'll do it that way. +posix_ipc.unlink_message_queue(utils.QUEUE_NAME) + diff --git a/demo3/repeating_thread.py b/demo3/repeating_thread.py new file mode 100644 index 0000000..e456428 --- /dev/null +++ b/demo3/repeating_thread.py @@ -0,0 +1,40 @@ +# Python modules +import time + +# 3rd party modules +import posix_ipc + +# Utils for this demo +import utils + + +def process_notification(mq): + message, priority = mq.receive() + + print ("Ding! Message with priority %d received: %s" % (priority, message)) + + # Re-register for notifications + mq.request_notification( (process_notification, mq) ) + + + +# Create the message queue. +mq = posix_ipc.MessageQueue(utils.QUEUE_NAME, posix_ipc.O_CREX) + +# Request notifications +mq.request_notification( (process_notification, mq) ) + +# Get user input and send it to the queue. +s = "42" +while s: + print ("\nEnter a message. A blank message will end the demo:") + s = utils.get_input() + if s: + mq.send(s) + +print ("Destroying the message queue.") +mq.close() +# I could call simply mq.unlink() here but in order to demonstrate +# unlinking at the module level I'll do it that way. +posix_ipc.unlink_message_queue(utils.QUEUE_NAME) + diff --git a/demo3/utils.py b/demo3/utils.py new file mode 100644 index 0000000..67e066f --- /dev/null +++ b/demo3/utils.py @@ -0,0 +1,15 @@ +# Python modules +import sys + + +QUEUE_NAME = "/my_message_queue" + + +PY_MAJOR_VERSION = sys.version_info[0] + +def get_input(): + """Get input from user, Python 2.x and 3.x compatible""" + if PY_MAJOR_VERSION > 2: + return input() + else: + return raw_input() diff --git a/demo4/ReadMe.txt b/demo4/ReadMe.txt new file mode 100644 index 0000000..25f73e9 --- /dev/null +++ b/demo4/ReadMe.txt @@ -0,0 +1,15 @@ +This demonstrates the use of a semaphore with a Python context manager (a +'with' statement). + +To run the demo, simply run `python parent.py`. It launches child.py. + +The programs parent.py and child.py share a semaphore; the latter acquires the +semaphore via a context manager. The child process deliberately kills itself via +an error about half the time (randomly) in order to demonstrate that the +context manager releases the semaphore regardless of whether or not the context +block is exited gracefully. + +Once the child releases the semaphore, the parent destroys it. + +The whole thing happens in less than 10 seconds. + diff --git a/demo4/child.py b/demo4/child.py new file mode 100644 index 0000000..cbceb37 --- /dev/null +++ b/demo4/child.py @@ -0,0 +1,21 @@ +import posix_ipc +import time +import sys +import random + +# The parent passes the semaphore's name to me. +name = sys.argv[1] + +print('Child: waiting to aquire semaphore ' + name) + +with posix_ipc.Semaphore(name) as sem: + print('Child: semaphore ' + sem.name + ' aquired; holding for 3 seconds.') + + # Flip a coin to determine whether or not to bail out of the context. + if random.randint(0, 1): + print("Child: raising ValueError to demonstrate unplanned context exit") + raise ValueError + + time.sleep(3) + + print('Child: gracefully exiting context (releasing the semaphore).') diff --git a/demo4/parent.py b/demo4/parent.py new file mode 100644 index 0000000..799a6e7 --- /dev/null +++ b/demo4/parent.py @@ -0,0 +1,46 @@ +import subprocess +import posix_ipc +import time +import os + +sem = posix_ipc.Semaphore(None, posix_ipc.O_CREX, initial_value = 1) +print("Parent: created semaphore {}.".format(sem.name)) + +sem.acquire() + +# Spawn a child that will wait on this semaphore. +path, _ = os.path.split(__file__) +print("Parent: spawning child process...") +subprocess.Popen(["python", os.path.join(path, 'child.py'), sem.name]) + +for i in range(3, 0, -1): + print("Parent: child process will acquire the semaphore in {} seconds...".format(i)) + time.sleep(1) + +sem.release() + +# Sleep for a second to give the child a chance to acquire the semaphore. +# This technique is a little sloppy because technically the child could still +# starve, but it's certainly sufficient for this demo. +time.sleep(1) + +# Wait for the child to release the semaphore. +print("Parent: waiting for the child to release the semaphore.") +sem.acquire() + +# Clean up. +print("Parent: destroying the semaphore.") +sem.release() +sem.unlink() + +msg = """ +By the time you're done reading this, the parent will have exited and so the +operating system will have destroyed the semaphore. You can prove that the +semaphore is gone by running this command and observing that it raises +posix_ipc.ExistentialError -- + + python -c "import posix_ipc; posix_ipc.Semaphore('{}')" + +""".format(sem.name) + +print(msg) diff --git a/history.html b/history.html new file mode 100644 index 0000000..e021ec4 --- /dev/null +++ b/history.html @@ -0,0 +1,501 @@ + + + + + + + + + + The posix_ipc Module for POSIX IPC Under Python -- Version History + + + + + + +

Version History

+ +

This is the version history for the +posix_ipc +module.

+ +
    +
  • Current – 0.9.8 (20 Feb 2014) – +

    As with 0.9.7, there are no code or feature changes in this version. + This version merely corrects a documentation error. +

    + +

    This version comes with a big wish for peace in Ukraine. Мир!

    +
  • + +
  • 0.9.7 (20 Feb 2014) – +

    There are no code or feature changes in this version. The bump in + version number reflects that this is the first version + also available on PyPI. +

    + +

    This version comes with a big wish for peace in Ukraine. Мир!

    +
  • + +
  • 0.9.6 (23 Oct 2013) – + +

    Fixed two BSD-specific bugs introduced in version 0.9.5 + that occurred if the kernel module mqueuefs wasn't + loaded at install time. Specifically -- +

    + +
      +
    • The installer + would print a harmless but distracting error message from sysctl. + (This also affected OS X which is FreeBSD-ish.) +
    • + +
    • posix_ipc would build with an inappropriate + value for QUEUE_MESSAGES_MAX_DEFAULT. + Subsequent attempts to create a message queue would fail unless the + caller set the max_messages param to an appropriate + value. (This didn't affect OS X since OS X doesn't support message + queues at all.) +
    • +
    + +

    Also, rewrote the message queue thread notification code to address + the old bug (Fatal Python error: PyEval_AcquireLock: current thread state is NULL) + that appeared during release testing for 0.9.5 and which + has plagued me on and off since I wrote this code. The new code uses + the + algorithm recommended in the Python documentation which may have + been flaky when I started using it in Python 2.4. It seems stable now + under Python 2.6+/3. +

    +
  • + + +
  • 0.9.5 (14 Oct 2013) – +
      +
    • Added the ability to use Semaphores in context managers. + Thanks to Matt Ruffalo for the suggestion and patch. +
    • +
    • Fixed a big under FreeBSD 9.x where I used overly ambitious + values for some message queue constants at build time. Now, + posix_ipc asks sysctl for the correct values. + Köszönöm to Attila Nagy for the bug report. +
    • +
    +
  • + + +
  • 0.9.4 (2 Sept 2012) – +

    Fixed a buglet. When creating shared memory under Linux and + specifying both a size and the read-only flag, creating the memory + would succeed but calling ftruncate() would fail. + The failure to change the size was correctly reported + but posix_ipc failed to clean up the shared memory segment + it had created. That's now fixed. Thanks to Kevin Miles for the bug + report. +

    +
  • + + +
  • 0.9.3 (2 Jan 2012) – +

    Added a bugfix/feature to raise an error (rather than segault) + when trying to use a closed semaphore. + Thanks to Russel for the suggestion and patch. +

    +
  • + + +
  • 0.9.2 (6 Nov 2011) – +
      +
    • Fixed a bug where timeouts in Semaphore.acquire(), + MessageQueue.send() and MessageQueue.receive() + were only accurate to about one second due to use of the C call + time(). Switching to gettimeofday() fixes + the problem. Thanks to Douglas Young for the bug report and + patch. +
    • + +
    • Fixed a bug in prober.py that caused install to fail + under Ubuntu 11.10. prober.py specified link options + in the wrong order, and so linking one of the test + applications that's built during setup was failing. Thanks + to Kevin Miles for the bug report. +
    • + +
    • Added a check in prober.py to see if + sysconf_names exists in the os module. It + doesn't exist under Cygwin, and this code caused an error + on that platform. Thanks to Rizwan Raza for the bug report. +
    • +
    +
  • + +
  • 0.9.1 (7 Apr 2011) – +
      +
    • Fixed (?) a bug in message queue thread notification that caused + ceval: tstate mix-up and other fun messages. Thanks to + Lev Maximov for the bug report. +
    • + +
    • Added the demo3 directory with demos of message queue. + This was supposed be included in version 0.9.0 but I accidentally + left it out. (Also reported by Lev.) +
    • +
    +
  • + +
  • 0.9.0 (31 Dec 2010) – +

    Added the demo3 directory with demos of message queue + notification techniques. Also, fixed two bugs related to message + queue notification. Big thanks to + Philip D. Bober for debugging and providing a patch to the + most difficult part of the code. The bugs were – +

    + +
      +
    • First, the series of calls to set up the Python thread in + process_notification() were simply wrong. They worked + some (most?) of the time but would segfault eventually because + I was creating a Python thread state when I should not have. +
    • + +
    • Second, the code in process_notification() failed + to consider that the user's callback might re-request + notification, thus overwriting pointers that I would later + decref. process_notification() is now thread-safe. +
    • +
    +
  • + + +
  • 0.8.1 (15 Mar 2010) – +

    Fixed a sloppy declaration that caused a compile error under + Cygwin 1.7.1. Thanks to Jill McCutcheon for the bug report. +

    +
  • + + +
  • 0.8.0 (2 Mar 2010) – +
      +
    • Fixed message queue support detection in FreeBSD and + the platform-specific documentation about FreeBSD. +
    • + +
    • Rearranged the documentation and split the history + (which you're reading now) into a separate file. +
    • + +
    • I fixed two small bugs related to the confusing + message queue constants. The bugs and associated changes are + explained below. The explanation is really long not + because the changes were big (they weren't), but because + they and rationale behind them are subtle. + +

      Fixing these bugs was made easier by this realization: + on all of the systems to which I have access that implement + message queues (FreeBSD, OpenSolaris, Linux, and Windows + + Cygwin), all except Linux implement them as + memory-mapped files or something similar. On these + non-Linux systems, the + maximum queue message count and size are pretty darn big + (LONG_MAX). Therefore, only on Linux is anyone likely to + encounter limits to message queue size and content. +

      + +

      The first bug I fixed was related to four message queue + constants mentioned in posix_ipc documentation: + QUEUE_MESSAGES_MAX, + QUEUE_MESSAGES_MAX_DEFAULT, + QUEUE_MESSAGE_SIZE_MAX and + QUEUE_MESSAGE_SIZE_MAX_DEFAULT. All four were defined + in the C + code, but the two XXX_DEFAULT constants weren't exposed on + the Python side. +

      + +

      The second bug was that under Linux, QUEUE_MESSAGES_MAX and + QUEUE_MESSAGE_SIZE_MAX were permanently fixed to their + values at posix_ipc's compile/install time even if the + relevant system values changed later. Thanks to Kyle Tippetts + for bringing this to my attention. +

      + +

      QUEUE_MESSAGES_MAX_DEFAULT was arbitrarily limited to + (at most) 1024. This wasn't a bug, just a bad choice. +

      + +

      I made a few changes in order to fix these problems –

      + +
        +
      1. The constants QUEUE_MESSAGES_MAX and + QUEUE_MESSAGE_SIZE_MAX + have been deleted since they were only sure to + be accurate on systems where they were irrelevant. Furthermore, + Linux (the only place where they matter) exposes these values + through the file system (in + /proc/sys/fs/mqueue/msg_max and + /proc/sys/fs/mqueue/msgsize_max respectively) so Python + apps that need them can read them without any help + from posix_ipc. +
      2. + +
      3. QUEUE_MESSAGES_MAX_DEFAULT and + QUEUE_MESSAGE_SIZE_MAX_DEFAULT are now exposed to + Python as they should have been all along. + QUEUE_MESSAGES_MAX_DEFAULT is now set to + LONG_MAX on all platforms except Linux, where + it's set at compile time from /proc/sys/fs/mqueue/msg_max. +
      4. +
      5. QUEUE_MESSAGE_SIZE_MAX_DEFAULT remains at the fairly + arbitrary value of 8k. It's not a good idea to make it too big + since a buffer of this size is allocated every time + MessageQueue.receive() is called. Under Linux, I + check the contents of /proc/sys/fs/mqueue/msgsize_max + and make QUEUE_MESSAGE_SIZE_MAX_DEFAULT smaller if + necessary. +
      6. +
      +
    • +
    +
  • + +
  • 0.7.0 (21 Feb 2010) – +

    Added Python 3.1 support.

    +
  • + +
  • 0.6.3 (15 Feb 2009) – +
      +
    • Fixed a bug where creating an IPC object with invalid + parameters would correctly raise a ValueError, but + with a message that may or may not have correctly identified + the cause. (My code was making an educated guess that was + sometimes wrong.) + +

      As of this version, if initialization of an IPC object + fails with the error code EINVAL, + posix_ipc raises a ValueError + with the vague-but-correct message "Invalid parameter(s)". +

      +
    • +
    • Cleaned up the code a little internally.
    • +
    +
  • + +
  • 0.6.2 (30 Dec 2009) – +

    Fixed a bug where a MessageQueue's mode + attribute returned garbage. Grazie to Stefano Debenedetti for + the bug report. +

    +
  • + +
  • 0.6.1 (29 Nov 2009) – +

    There were no functional changes to the module in this version, but + I added the convenience function close_fd() and fixed + some docmentation and demo bugs/sloppiness. +

    +
      +
    • Added the convenience function SharedMemory.close_fd(). + Thanks to Kyle Tippetts for pointing out the usefulness + of this. +
    • +
    • Added the module attributes __version__, + __copyright__, __author__ and + __license__. +
    • +
    • Fixed the license info embedded in posix_ipc_module.c + which was still referring to GPL. +
    • Replaced file() in setup.py with + open()/close().
    • + +
    • Demo changes – +
        +
      • Made the demo a bit faster, especially for large + shared memory chunks. Thanks to Andrew Trevorrow + for the suggestion and patch. +
      • +
      • Fixed a bug in premise.c; it wasn't closing the semaphore.
      • +
      • Fixed a bug in premise.py; it wasn't closing the + shared memory's file descriptor. +
      • +
      • Fixed bugs in conclusion.py; it wasn't closing the + shared memory's file descriptor, the semaphore or + the mapfile. +
      • +
      +
    • +
    +
  • + +
  • 0.6 (5 Oct 2009) – +
      +
    • Relicensed from the GPL to a BSD license to celebrate the + one year anniversary of this module. +
    • Updated Cygwin info.
    • +
    +
  • + +
  • 0.5.5 (17 Sept 2009) – +
      +
    • Set MQ_MAX_MESSAGES and MQ_MAX_MESSAGE_SIZE to + LONG_MAX under cygwin. + (Danke to René Liebscher.) +
    • +
    • Surrounded the #define PAGE_SIZE in probe_results.h with + #ifndef/#endif because it is already defined on some systems. + (Danke to René Liebscher, again.) +
    • +
    • Minor documentation changes.
    • +
    +
  • + +
  • 0.5.4 (21 Jun 2009) – + +
  • + +
  • 0.5.3 (8 Mar 2009) – +
      +
    • Added automatic generation of names.
    • +
    • Changed status to beta.
    • +
    +
  • + +
  • 0.5.2 (12 Feb 2009) – +
      +
    • Fixed a memory leak in MessageQueue.receive().
    • +
    • Fixed a bug where the name of the MessageQueue + current_messages attribute didn't match the name + given in the documentation. +
    • +
    • Added the VERSION attribute to the module.
    • +
    • Fixed a documentation bug that said message queue + notifications were not yet supported. +
    • +
    +
  • + +
  • 0.5.1 (8 Feb 2009) – +
      +
    • Fixed outdated info in setup.py that was showing up + in the Python package index. Updated README while I + was at it. +
    • +
    +
  • + +
  • 0.5 (8 Feb 2009) – +
      +
    • Added the message queue notification feature.
    • +
    • Added a mode attribute to each type.
    • +
    • Added str() and repr() support to + each object. +
    • +
    • Added a demo for message queues.
    • +
    • Fixed some minor documentation problems and added + some information (esp. about Windows + Cygwin). +
    • +
    +
  • + +
  • 0.4 (9 Jan 2009) – +
      +
    • Added message queue support.
    • +
    • Fixed the poor choices I'd made for names for classes and + errors by removing the leading "Posix" and "PosixIpc". +
    • +
    • Simplified the prober and expanded it (for message + queue support). +
    • +
    • Cleaned up this documentation.
    • +
    +
  • + +
  • 0.3.2 (4 Jan 2009) – +
      +
    • Fixed an uninitialized value passed to PyMem_Free() when + invalid params were passed to either constructor. +
    • +
    +
  • + +
  • 0.3.1 (1 Jan 2009) – +
      +
    • Fixed a big bug where the custom exceptions defined by this + module weren't visible. +
    • +
    • Fixed a compile complaint about the redefinition of + SEM_VALUE_MAX on Linux (Ubuntu) that I introduced + in the previous version. +
    • +
    • Fixed a bug in the demo program premise.c where I wasn't + closing the file descriptor associated with the shared + memory. +
    • +
    • Added the PAGE_SIZE attribute. This was already + available in the mmap module that you need to use shared + memory anyway, but adding it makes the interface more + consistent with the sysv_ipc module. +
    • +
    +
  • + +
  • 0.3 (19 Dec 2008) – +
      +
    • Added informative custom errors instead of raising + OSError when something goes wrong. +
    • + +
    • Made the code friendly to multi-threaded applications. +
    • + +
    • Added the constants O_CREX and + SEMAPHORE_VALUE_MAX. +
    • + +
    • Added code to prohibit negative timeout values.
    • +
    +
  • +
  • 0.2 (4 Dec 2008) – +
      +
    • Removed the un-Pythonic try_acquire() method. The + same functionality is now available by passing a timeout of + 0 to the .acquire() method. +
    • + +
    • Renamed the module constant ACQUIRE_TIMEOUT_SUPPORTED to + SEMAPHORE_TIMEOUT_SUPPORTED. +
    • + +
    • Moved the demo code into its own directory and added C + versions of the Python scripts. The parameters are now in a + text file shared by the Python and C program, so you can + run the C version of Mrs. Premise and have it communicate with + the Python version of Mrs. Conclusion and vice versa. +
    • +
    +
  • +
  • 0.1 (9 Oct 2008) – Original (alpha) version.
  • +
+ + + diff --git a/posix_ipc_module.c b/posix_ipc_module.c new file mode 100644 index 0000000..7adffc0 --- /dev/null +++ b/posix_ipc_module.c @@ -0,0 +1,2612 @@ +/* +posix_ipc - A Python module for accessing POSIX 1003.1b-1993 semaphores, + shared memory and message queues. + +Copyright (c) 2012, Philip Semanchuk +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of posix_ipc nor the + names of its contributors may be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY ITS CONTRIBUTORS ''AS IS'' AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL Philip Semanchuk BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#define PY_SSIZE_T_CLEAN + +#include +#include "structmember.h" + +#include +#include +#include +#include +#include + +// Just for the math surrounding timeouts for sem_timedwait() +#include + +// For mq_notify +#include +#include + +#include "probe_results.h" + +// For semaphore stuff +#include + +// For shared memory stuff +#include +#include + +#ifdef MESSAGE_QUEUE_SUPPORT_EXISTS +// For msg queues +#include +#endif + +// define Py_TYPE for versions before Python 2.6 +#ifndef Py_TYPE +#define Py_TYPE(ob) (((PyObject*)(ob))->ob_type) +#endif + +// define PyVarObject_HEAD_INIT for versions before Python 2.6 +#ifndef PyVarObject_HEAD_INIT +#define PyVarObject_HEAD_INIT(type, size) PyObject_HEAD_INIT(type) size, +#endif + +/* SEM_FAILED is defined as an int in Apple's headers, and this makes the +compiler complain when I compare it to a pointer. Python faced the same +problem (issue 9586) and I copied their solution here. +ref: http://bugs.python.org/issue9586 + +Note that in /Developer/SDKs/MacOSX10.4u.sdk/usr/include/sys/semaphore.h, +SEM_FAILED is #defined as -1 and that's apparently the definition used by +Python when building. In /usr/include/sys/semaphore.h, it's defined +as ((sem_t *)-1). +*/ +#ifdef __APPLE__ + #undef SEM_FAILED + #define SEM_FAILED ((sem_t *)-1) +#endif + +/* POSIX says that a mode_t "shall be an integer type". To avoid the need +for a specific get_mode function for each type, I'll just stuff the mode into +a long and mention it in the Xxx_members list for each type. +ref: http://www.opengroup.org/onlinepubs/000095399/basedefs/sys/types.h.html +*/ + +typedef struct { + PyObject_HEAD + char *name; + long mode; + sem_t *pSemaphore; +} Semaphore; + + +typedef struct { + PyObject_HEAD + char *name; + long mode; + int fd; +} SharedMemory; + + +#ifdef MESSAGE_QUEUE_SUPPORT_EXISTS +typedef struct { + PyObject_HEAD + char *name; + mqd_t mqd; + long mode; + long max_messages; + long max_message_size; + int send_permitted; + int receive_permitted; + PyObject *notification_callback; + PyObject *notification_callback_param; + // In the event that the user requests notifications in a new thread, + // I'll need a reference to the interpreter in order to create the + // thread for the callback. See request_notification() and + // process_notification() for details. + PyInterpreterState *interpreter; +} MessageQueue; +#endif + +// FreeBSD (and perhaps other BSDs) limit names to 14 characters. In the +// code below, strings of this length are allocated on the stack, so +// increase this gently or change that code to use malloc(). +#define MAX_SAFE_NAME_LENGTH 14 + + +/* Struct to contain a timeout which can be None */ +typedef struct { + int is_none; + int is_zero; + struct timespec timestamp; +} NoneableTimeout; + + +/* Struct to contain an IPC object name which can be None */ +typedef struct { + int is_none; + char *name; +} NoneableName; + + +/* + Exceptions for this module +*/ + +static PyObject *pBaseException; +static PyObject *pPermissionsException; +static PyObject *pSignalException; +static PyObject *pExistentialException; +static PyObject *pBusyException; + + +#define ONE_BILLION 1000000000 + +#ifdef POSIX_IPC_DEBUG +#define DPRINTF(fmt, args...) fprintf(stderr, "+++ " fmt, ## args) +#else +#define DPRINTF(fmt, args...) +#endif + +#if PY_MAJOR_VERSION > 2 +static char * +bytes_to_c_string(PyObject* o, int lock) { +/* Convert a bytes object to a char *. Optionally lock the buffer if it is a + bytes array. + This code swiped directly from Python 3.1's posixmodule.c by Yours Truly. + The name there is bytes2str(). +*/ + if (PyBytes_Check(o)) + return PyBytes_AsString(o); + else if (PyByteArray_Check(o)) { + if (lock && PyObject_GetBuffer(o, NULL, 0) < 0) + /* On a bytearray, this should not fail. */ + PyErr_BadInternalCall(); + return PyByteArray_AsString(o); + } else { + /* The FS converter should have verified that this + is either bytes or bytearray. */ + Py_FatalError("bad object passed to bytes2str"); + /* not reached. */ + return ""; + } +} + +static void +release_bytes(PyObject* o) + /* Release the lock, decref the object. + This code swiped directly from Python 3.1's posixmodule.c by Yours Truly. + */ +{ + if (PyByteArray_Check(o)) + o->ob_type->tp_as_buffer->bf_releasebuffer(NULL, 0); + Py_DECREF(o); +} +#endif + + +static int +random_in_range(int min, int max) { + // returns a random int N such that min <= N <= max + int diff = (max - min) + 1; + + // ref: http://www.c-faq.com/lib/randrange.html + return ((int)((double)rand() / ((double)RAND_MAX + 1) * diff)) + min; +} + + +static +int create_random_name(char *name) { + // The random name is always lowercase so that this code will work + // on case-insensitive file systems. It always starts with a forward + // slash. + int length; + char *alphabet = "abcdefghijklmnopqrstuvwxyz"; + int i; + + // Generate a random length for the name. I subtract 1 from the + // MAX_SAFE_NAME_LENGTH in order to allow for the name's leading "/". + length = random_in_range(6, MAX_SAFE_NAME_LENGTH - 1); + + name[0] = '/'; + name[length] = '\0'; + i = length; + while (--i) + name[i] = alphabet[random_in_range(0, 25)]; + + return length; +} + + +static int +convert_name_param(PyObject *py_name_param, void *checked_name) { + /* Verifies that the py_name_param is either None or a string. + If it's a string, checked_name->name points to a PyMalloc-ed buffer + holding a NULL-terminated C version of the string when this function + concludes. The caller is responsible for releasing the buffer. + */ + int rc = 0; + NoneableName *p_name = (NoneableName *)checked_name; +#if PY_MAJOR_VERSION > 2 + PyObject *py_name_as_bytes = NULL; + char *p_name_as_c_string = NULL; +#endif + + p_name->is_none = 0; + + // The name can be None or a Python string + if (py_name_param == Py_None) { + DPRINTF("name is None\n"); + rc = 1; + p_name->is_none = 1; + } +#if PY_MAJOR_VERSION > 2 + else if (PyUnicode_Check(py_name_param)) { + // The caller passed me a Unicode string; I need a char *. Getting + // from one to the other takes a couple steps. + + // PyUnicode_FSConverter() converts the Unicode object into a + // bytes or a bytearray object. (Why can't it be one or the other?!?) + PyUnicode_FSConverter(py_name_param, &py_name_as_bytes); + + // bytes_to_c_string() returns a pointer to the buffer. + p_name_as_c_string = bytes_to_c_string(py_name_as_bytes, 0); + + // PyMalloc memory and copy the user-supplied name to it. + p_name->name = (char *)PyMem_Malloc(strlen(p_name_as_c_string) + 1); + if (p_name->name) { + rc = 1; + strcpy(p_name->name, p_name_as_c_string); + } + else + PyErr_SetString(PyExc_MemoryError, "Out of memory"); + + // The bytes version of the name isn't useful to me, and per the + // documentation for PyUnicode_FSConverter(), I am responsible for + // releasing it when I'm done. + release_bytes(py_name_as_bytes); + } +#else + else if (PyString_Check(py_name_param)) { + // PyMalloc memory and copy the user-supplied name to it. + p_name->name = (char *)PyMem_Malloc(PyString_Size(py_name_param) + 1); + if (p_name->name) { + rc = 1; + strcpy(p_name->name, PyString_AsString(py_name_param)); + } + else + PyErr_SetString(PyExc_MemoryError, "Out of memory"); + } +#endif + else + PyErr_SetString(PyExc_TypeError, "Name must be None or a string"); + + return rc; +} + + +static +int convert_timeout(PyObject *py_timeout, void *converted_timeout) { + // Converts a PyObject into a timeout if possible. The PyObject should + // be None or some sort of numeric value (e.g. int, float, etc.) + // converted_timeout should point to a NoneableTimeout. When this function + // returns, if the NoneableTimeout's is_none is true, then the rest of the + // struct is undefined. Otherwise, the rest of the struct is populated. + int rc = 0; + double simple_timeout = 0; + struct timeval current_time; + NoneableTimeout *p_timeout = (NoneableTimeout *)converted_timeout; + + // The timeout can be None or any Python numeric type (float, + // int, long). + if (py_timeout == Py_None) + rc = 1; + else if (PyFloat_Check(py_timeout)) { + rc = 1; + simple_timeout = PyFloat_AsDouble(py_timeout); + } +#if PY_MAJOR_VERSION < 3 + else if (PyInt_Check(py_timeout)) { + rc = 1; + simple_timeout = (double)PyInt_AsLong(py_timeout); + } +#endif + else if (PyLong_Check(py_timeout)) { + rc = 1; + simple_timeout = (double)PyLong_AsLong(py_timeout); + } + + // The timeout may not be negative. + if ((rc) && (simple_timeout < 0)) + rc = 0; + + if (!rc) + PyErr_SetString(PyExc_TypeError, + "The timeout must be None or a non-negative number"); + else { + if (py_timeout == Py_None) + p_timeout->is_none = 1; + else { + p_timeout->is_none = 0; + + p_timeout->is_zero = (!simple_timeout); + + gettimeofday(¤t_time, NULL); + + simple_timeout += current_time.tv_sec; + simple_timeout += (float)current_time.tv_usec / 1e6; + + p_timeout->timestamp.tv_sec = (time_t)floor(simple_timeout); + p_timeout->timestamp.tv_nsec = (long)((simple_timeout - floor(simple_timeout)) * ONE_BILLION); + } + } + + return rc; +} + +static PyObject * +generic_str(char *name) { +#if PY_MAJOR_VERSION > 2 + return PyUnicode_FromString(name ? name : "(no name)"); +#else + return PyString_FromString(name ? name : "(no name)"); +#endif +} + +static void +mode_to_str(long mode, char *mode_str) { + // Given a numeric mode and preallocated string space, populates the + // string with the mode formatted as an octal number. + sprintf(mode_str, "0%o", (int)mode); +} + + +static int test_semaphore_validity(Semaphore *p) { + // Returns 1 (true) if the Semaphore object refers to a valid + // semaphore, 0 (false) otherwise. In the latter case, it sets the + // Python exception info and the caller should immediately return NULL. + // The false condition should not arise unless the user of the module + // tries to use a Semaphore after it's been closed. + int valid = 1; + + if (!p->pSemaphore) { + valid = 0; + PyErr_SetString(pExistentialException, "The semaphore has been closed"); + } + + return valid; +} + +/* ===== Semaphore implementation functions ===== */ + +static PyObject * +sem_str(Semaphore *self) { + return generic_str(self->name); +} + + +static PyObject * +sem_repr(Semaphore *self) { + char mode[32]; + + mode_to_str(self->mode, mode); + +#if PY_MAJOR_VERSION > 2 + return PyUnicode_FromFormat("posix_ipc.Semaphore(\"%s\", mode=%s)", + self->name, mode); +#else + return PyString_FromFormat("posix_ipc.Semaphore(\"%s\", mode=%s)", + self->name, mode); +#endif +} + + +static PyObject * +my_sem_unlink(const char *name) { + DPRINTF("unlinking sem name %s\n", name); + if (-1 == sem_unlink(name)) { + switch (errno) { + case EACCES: + PyErr_SetString(pPermissionsException, + "Denied permission to unlink this semaphore"); + break; + + case ENOENT: + case EINVAL: + PyErr_SetString(pExistentialException, + "No semaphore exists with the specified name"); + break; + + case ENAMETOOLONG: + PyErr_SetString(PyExc_ValueError, "The name is too long"); + break; + + default: + PyErr_SetFromErrno(PyExc_OSError); + break; + } + goto error_return; + } + + Py_RETURN_NONE; + + error_return: + return NULL; +} + + +static void +Semaphore_dealloc(Semaphore *self) { + /* Note -- I make no attempt to close the semaphore because that + kills access to the semaphore for every thread in this process, + which would make multi-threaded programming difficult. + */ + DPRINTF("dealloc\n"); + PyMem_Free(self->name); + self->name = NULL; + + Py_TYPE(self)->tp_free((PyObject*)self); +} + + +static PyObject * +Semaphore_new(PyTypeObject *type, PyObject *args, PyObject *kwlist) { + Semaphore *self; + + self = (Semaphore *)type->tp_alloc(type, 0); + + return (PyObject *)self; +} + + +static int +Semaphore_init(Semaphore *self, PyObject *args, PyObject *keywords) { + NoneableName name; + char temp_name[MAX_SAFE_NAME_LENGTH + 1]; + unsigned int initial_value = 0; + int flags = 0; + static char *keyword_list[ ] = {"name", "flags", "mode", "initial_value", NULL}; + + // First things first -- initialize the self struct. + self->pSemaphore = NULL; + self->name = NULL; + self->mode = 0600; + + // Semaphore(name, [flags = 0, [mode = 0600, [initial_value = 0]]]) + + if (!PyArg_ParseTupleAndKeywords(args, keywords, "O&|iiI", keyword_list, + &convert_name_param, &name, &flags, + &(self->mode), &initial_value)) + goto error_return; + + + if ( !(flags & O_CREAT) && (flags & O_EXCL) ) { + PyErr_SetString(PyExc_ValueError, + "O_EXCL must be combined with O_CREAT"); + goto error_return; + } + + if (name.is_none && ((flags & O_EXCL) != O_EXCL)) { + PyErr_SetString(PyExc_ValueError, + "Name can only be None if O_EXCL is set"); + goto error_return; + } + + if (name.is_none) { + // (name == None) ==> generate a name for the caller + do { + errno = 0; + create_random_name(temp_name); + + DPRINTF("Calling sem_open, name=%s, flags=0x%x, mode=0%o, initial value=%d\n", + temp_name, flags, (int)self->mode, initial_value); + self->pSemaphore = sem_open(temp_name, flags, (mode_t)self->mode, + initial_value); + + } while ( (SEM_FAILED == self->pSemaphore) && (EEXIST == errno) ); + + // PyMalloc memory and copy the randomly-generated name to it. + self->name = (char *)PyMem_Malloc(strlen(temp_name) + 1); + if (self->name) + strcpy(self->name, temp_name); + else { + PyErr_SetString(PyExc_MemoryError, "Out of memory"); + goto error_return; + } + } + else { + // (name != None) ==> use name supplied by the caller. It was + // already converted to C by convert_name_param(). + self->name = name.name; + + DPRINTF("Calling sem_open, name=%s, flags=0x%x, mode=0%o, initial value=%d\n", + self->name, flags, (int)self->mode, initial_value); + self->pSemaphore = sem_open(self->name, flags, (mode_t)self->mode, + initial_value); + } + + DPRINTF("pSemaphore == %p\n", self->pSemaphore); + + if (self->pSemaphore == SEM_FAILED) { + self->pSemaphore = NULL; + + switch (errno) { + case EACCES: + PyErr_SetString(pPermissionsException, + "Permission denied"); + break; + + case EEXIST: + PyErr_SetString(pExistentialException, + "A semaphore with the specified name already exists"); + break; + + case ENOENT: + PyErr_SetString(pExistentialException, + "No semaphore exists with the specified name"); + break; + + case EINVAL: + PyErr_SetString(PyExc_ValueError, "Invalid parameter(s)"); + break; + + case EMFILE: + PyErr_SetString(PyExc_OSError, + "This process already has the maximum number of files open"); + break; + + case ENFILE: + PyErr_SetString(PyExc_OSError, + "The system limit on the total number of open files has been reached"); + break; + + case ENAMETOOLONG: + PyErr_SetString(PyExc_ValueError, "The name is too long"); + break; + + case ENOMEM: + PyErr_SetString(PyExc_MemoryError, "Not enough memory"); + break; + + default: + PyErr_SetFromErrno(PyExc_OSError); + break; + } + + goto error_return; + } + // else + // all is well, nothing to do + + return 0; + + error_return: + return -1; +} + + +static PyObject * +Semaphore_release(Semaphore *self) { + if (!test_semaphore_validity(self)) + goto error_return; + + if (-1 == sem_post(self->pSemaphore)) { + switch (errno) { + case EINVAL: + case EBADF: + PyErr_SetString(pExistentialException, + "The semaphore does not exist"); + break; + + default: + PyErr_SetFromErrno(PyExc_OSError); + break; + } + goto error_return; + } + + Py_RETURN_NONE; + + error_return: + return NULL; +} + + +static PyObject * +Semaphore_acquire(Semaphore *self, PyObject *args, PyObject *keywords) { + NoneableTimeout timeout; + int rc = 0; + + if (!test_semaphore_validity(self)) + goto error_return; + + // Initialize this to the default of None. + timeout.is_none = 1; + + // acquire([timeout=None]) + + if (!PyArg_ParseTuple(args, "|O&", convert_timeout, &timeout)) + goto error_return; + + Py_BEGIN_ALLOW_THREADS + // timeout == None: no timeout, i.e. wait forever. + // timeout == 0: raise an error if a wait would occur. + // timeout > 0: wait no longer than t seconds before raising an error. + if (timeout.is_none) { + DPRINTF("calling sem_wait()\n"); + rc = sem_wait(self->pSemaphore); + } + else { + // Timeout is not None (i.e. is numeric) + // A simple_timeout of zero implies the same behavior as + // sem_trywait() so I call that instead. Doing so makes it easier + // to ensure this code behaves consistently regardless of whether + // or not sem_timedwait() is available. + if (timeout.is_zero) { + DPRINTF("calling sem_trywait()\n"); + rc = sem_trywait(self->pSemaphore); + } + else { + // timeout is not None and is > 0.0 + // sem_timedwait isn't available on all systems. Where it's not + // available I call sem_wait() instead. +#ifdef SEM_TIMEDWAIT_EXISTS + DPRINTF("calling sem_timedwait()\n"); + DPRINTF("timeout tv_sec = %ld; timeout tv_nsec = %ld\n", + timeout.timestamp.tv_sec, timeout.timestamp.tv_nsec); + + rc = sem_timedwait(self->pSemaphore, &(timeout.timestamp)); +#else + DPRINTF("calling sem_wait()\n"); + rc = sem_wait(self->pSemaphore); +#endif + } + } + Py_END_ALLOW_THREADS + + if (-1 == rc) { + DPRINTF("sem_wait() rc = %d, errno = %d\n", rc, errno); + + switch (errno) { + case EBADF: + case EINVAL: + // Linux documentation says that EINVAL has two meanings -- + // 1) self->pSemaphore no longer points to a valid semaphore + // 2) timeout is < 0 or > one billion. + // Since my code above guards against out-of-range + // timeout values, I expect the second condition is + // impossible here. + PyErr_SetString(pExistentialException, + "The semaphore does not exist"); + break; + + case EINTR: + /* If the signal was generated by Ctrl-C, calling + PyErr_CheckSignals() here has the side effect of setting + Python's error indicator. Otherwise there's a good chance + it won't be set. + http://groups.google.com/group/comp.lang.python/browse_thread/thread/ada39e984dfc3da6/fd6becbdce91a6be?#fd6becbdce91a6be + */ + PyErr_CheckSignals(); + + if (!(PyErr_Occurred() && + PyErr_ExceptionMatches(PyExc_KeyboardInterrupt)) + ) { + PyErr_Clear(); + PyErr_SetString(pSignalException, + "The wait was interrupted by a signal"); + } + // else + // If KeyboardInterrupt error is set, I propogate that + // up to the caller. + break; + + case EAGAIN: + case ETIMEDOUT: + PyErr_SetString(pBusyException, + "Semaphore is busy"); + break; + + default: + PyErr_SetFromErrno(PyExc_OSError); + break; + } + + goto error_return; + } + + Py_RETURN_NONE; + + error_return: + return NULL; +} + + +// sem_getvalue isn't available on all systems. +#ifdef SEM_GETVALUE_EXISTS +static PyObject * +Semaphore_getvalue(Semaphore *self, void *closure) { + int value; + + if (!test_semaphore_validity(self)) + goto error_return; + + if (-1 == sem_getvalue(self->pSemaphore, &value)) { + switch (errno) { + case EINVAL: + PyErr_SetString(pExistentialException, + "The semaphore does not exist"); + break; + + default: + PyErr_SetFromErrno(PyExc_OSError); + break; + } + goto error_return; + } + + return Py_BuildValue("i", value); + + error_return: + return NULL; +} +// end #ifdef SEM_GETVALUE_EXISTS +#endif + + +static PyObject * +Semaphore_unlink(Semaphore *self) { + if (!test_semaphore_validity(self)) + goto error_return; + + return my_sem_unlink(self->name); + + error_return: + return NULL; +} + + +static PyObject * +Semaphore_close(Semaphore *self) { + if (!test_semaphore_validity(self)) + goto error_return; + + if (-1 == sem_close(self->pSemaphore)) { + switch (errno) { + case EINVAL: + PyErr_SetString(pExistentialException, + "The semaphore does not exist"); + break; + + default: + PyErr_SetFromErrno(PyExc_OSError); + break; + } + goto error_return; + } + else + self->pSemaphore = NULL; + + Py_RETURN_NONE; + + error_return: + return NULL; +} + + +static PyObject * +Semaphore_enter(Semaphore *self) { + PyObject *args = PyTuple_New(0); + PyObject *retval = NULL; + + if (Semaphore_acquire(self, args, NULL)) { + retval = (PyObject *)self; + Py_INCREF(self); + } + /* else acquisition failed for some reason so just fall through to + the return statement below and return NULL. Semaphore_acquire() has + already called PyErr_SetString() to set the relevant error. + */ + + Py_DECREF(args); + + return retval; +} + + +static PyObject * +Semaphore_exit(Semaphore *self, PyObject *args) { + DPRINTF("exiting context and releasing semaphore %s\n", self->name); + return Semaphore_release(self); +} + +/* ===== End Semaphore functions ===== */ + + + + +/* ===== Begin Shared Memory implementation functions ===== */ + +static PyObject * +shm_str(SharedMemory *self) { + return generic_str(self->name); +} + +static PyObject * +shm_repr(SharedMemory *self) { + char mode[32]; + + mode_to_str(self->mode, mode); + +#if PY_MAJOR_VERSION > 2 + return PyUnicode_FromFormat("posix_ipc.SharedMemory(\"%s\", mode=%s)", + self->name, mode); +#else + return PyString_FromFormat("posix_ipc.SharedMemory(\"%s\", mode=%s)", + self->name, mode); +#endif +} + +static PyObject * +my_shm_unlink(const char *name) { + DPRINTF("unlinking shm name %s\n", name); + if (-1 == shm_unlink(name)) { + switch (errno) { + case EACCES: + PyErr_SetString(pPermissionsException, "Permission denied"); + break; + + case ENOENT: + PyErr_SetString(pExistentialException, + "No shared memory exists with the specified name"); + break; + + case ENAMETOOLONG: + PyErr_SetString(PyExc_ValueError, "The name is too long"); + break; + + default: + PyErr_SetFromErrno(PyExc_OSError); + break; + } + + goto error_return; + } + + Py_RETURN_NONE; + + error_return: + return NULL; +} + + +static PyObject * +SharedMemory_new(PyTypeObject *type, PyObject *args, PyObject *kwlist) { + SharedMemory *self; + + self = (SharedMemory *)type->tp_alloc(type, 0); + + return (PyObject *)self; +} + + +static int +SharedMemory_init(SharedMemory *self, PyObject *args, PyObject *keywords) { + NoneableName name; + char temp_name[MAX_SAFE_NAME_LENGTH + 1]; + unsigned int flags = 0; + unsigned long size = 0; + int read_only = 0; + static char *keyword_list[ ] = {"name", "flags", "mode", "size", "read_only", NULL}; + + // First things first -- initialize the self struct. + self->name = NULL; + self->fd = 0; + self->mode = 0600; + + if (!PyArg_ParseTupleAndKeywords(args, keywords, "O&|Iiki", keyword_list, + &convert_name_param, &name, &flags, + &(self->mode), &size, &read_only)) + goto error_return; + + if ( !(flags & O_CREAT) && (flags & O_EXCL) ) { + PyErr_SetString(PyExc_ValueError, + "O_EXCL must be combined with O_CREAT"); + goto error_return; + } + + if (name.is_none && ((flags & O_EXCL) != O_EXCL)) { + PyErr_SetString(PyExc_ValueError, + "Name can only be None if O_EXCL is set"); + goto error_return; + } + + flags |= (read_only ? O_RDONLY : O_RDWR); + + if (name.is_none) { + // (name == None) ==> generate a name for the caller + do { + errno = 0; + create_random_name(temp_name); + + DPRINTF("calling shm_open, name=%s, flags=0x%x, mode=0%o\n", + temp_name, flags, (int)self->mode); + self->fd = shm_open(temp_name, flags, (mode_t)self->mode); + + } while ( (-1 == self->fd) && (EEXIST == errno) ); + + // PyMalloc memory and copy the randomly-generated name to it. + self->name = (char *)PyMem_Malloc(strlen(temp_name) + 1); + if (self->name) + strcpy(self->name, temp_name); + else { + PyErr_SetString(PyExc_MemoryError, "Out of memory"); + goto error_return; + } + } + else { + // (name != None) ==> use name supplied by the caller. It was + // already converted to C by convert_name_param(). + self->name = name.name; + + DPRINTF("calling shm_open, name=%s, flags=0x%x, mode=0%o\n", + self->name, flags, (int)self->mode); + self->fd = shm_open(self->name, flags, (mode_t)self->mode); + } + + DPRINTF("shm fd = %d\n", self->fd); + + if (-1 == self->fd) { + self->fd = 0; + switch (errno) { + case EACCES: + PyErr_Format(pPermissionsException, + "No permission to %s this segment", + (flags & O_TRUNC) ? "truncate" : "access" + ); + break; + + case EEXIST: + PyErr_SetString(pExistentialException, + "Shared memory with the specified name already exists"); + break; + + case ENOENT: + PyErr_SetString(pExistentialException, + "No shared memory exists with the specified name"); + break; + + case EINVAL: + PyErr_SetString(PyExc_ValueError, "Invalid parameter(s)"); + break; + + case EMFILE: + PyErr_SetString(PyExc_OSError, + "This process already has the maximum number of files open"); + break; + + case ENFILE: + PyErr_SetString(PyExc_OSError, + "The system limit on the total number of open files has been reached"); + break; + + case ENAMETOOLONG: + PyErr_SetString(PyExc_ValueError, + "The name is too long"); + break; + + default: + PyErr_SetFromErrno(PyExc_OSError); + break; + } + + goto error_return; + } + else { + if (size) { + DPRINTF("calling ftruncate, fd = %d, size = %ld\n", self->fd, size); + if (-1 == ftruncate(self->fd, (off_t)size)) { + // The code below will raise a Python error. Since that error + // is raised during __init__(), it will look to the caller + // as if object creation failed entirely. Here I clean up + // the system object I just created. + close(self->fd); + shm_unlink(self->name); + + // ftruncate can return a ton of different errors, but most + // are not relevant or are extremely unlikely. + switch (errno) { + case EINVAL: + PyErr_SetString(PyExc_ValueError, + "The size is invalid or the memory is read-only"); + break; + + case EFBIG: + PyErr_SetString(PyExc_ValueError, + "The size is too large"); + break; + + case EROFS: + case EACCES: + PyErr_SetString(pPermissionsException, + "The memory is read-only"); + break; + + default: + PyErr_SetFromErrno(PyExc_OSError); + break; + } + + goto error_return; + } + } + } + + return 0; + + error_return: + return -1; +} + + +static void SharedMemory_dealloc(SharedMemory *self) { + DPRINTF("dealloc\n"); + PyMem_Free(self->name); + self->name = NULL; + + Py_TYPE(self)->tp_free((PyObject*)self); +} + + +PyObject * +SharedMemory_getsize(SharedMemory *self, void *closure) { + struct stat fileinfo; + off_t size = -1; + + if (0 == fstat(self->fd, &fileinfo)) + size = fileinfo.st_size; + else { + switch (errno) { + case EBADF: + case EINVAL: + PyErr_SetString(pExistentialException, + "The segment does not exist"); + break; + + default: + PyErr_SetFromErrno(PyExc_OSError); + break; + } + + goto error_return; + } + + return Py_BuildValue("k", (unsigned long)size); + + error_return: + return NULL; +} + + +PyObject * +SharedMemory_close_fd(SharedMemory *self) { + if (self->fd) { + if (-1 == close(self->fd)) { + switch (errno) { + case EBADF: + PyErr_SetString(PyExc_ValueError, + "The file descriptor is invalid"); + break; + + default: + PyErr_SetFromErrno(PyExc_OSError); + break; + } + + goto error_return; + } + } + + Py_RETURN_NONE; + + error_return: + return NULL; +} + + +PyObject * +SharedMemory_unlink(SharedMemory *self) { + return my_shm_unlink(self->name); +} + + +/* ===== End Shared Memory functions ===== */ + + +/* ===== Begin Message Queue implementation functions ===== */ + +#ifdef MESSAGE_QUEUE_SUPPORT_EXISTS + +static PyObject * +mq_str(MessageQueue *self) { + return generic_str(self->name); +} + +static PyObject * +mq_repr(MessageQueue *self) { + char mode[32]; + char read[32]; + char write[32]; + + strcpy(read, self->receive_permitted ? "True" : "False"); + strcpy(write, self->send_permitted ? "True" : "False"); + mode_to_str(self->mode, mode); + +#if PY_MAJOR_VERSION > 2 + return PyUnicode_FromFormat("posix_ipc.MessageQueue(\"%s\", mode=%s, max_message_size=%ld, max_messages=%ld, read=%s, write=%s)", + self->name, mode, self->max_message_size, self->max_messages, + read, write); +#else + return PyString_FromFormat("posix_ipc.MessageQueue(\"%s\", mode=%s, max_message_size=%ld, max_messages=%ld, read=%s, write=%s)", + self->name, mode, self->max_message_size, self->max_messages, + read, write); +#endif +} + + +void +mq_cancel_notification(MessageQueue *self) { + // Based on the documentation, mq_notify() can only fail in this context + // if mqd is invalid. That will only occur if the queue has been + // destroyed, in which case notifications are effectively cancelled + // anyway. Therefore I don't care about the return code from mq_notify() + // and this function is always successful. + + // I hope this doesn't come back to bite me... + int rc; + + rc = mq_notify(self->mqd, NULL); + DPRINTF("Notification cancelled, rc=%d\n", rc); + + Py_XDECREF(self->notification_callback); + self->notification_callback = NULL; + Py_XDECREF(self->notification_callback_param); + self->notification_callback_param = NULL; +} + + +static PyObject * +my_mq_unlink(const char *name) { + DPRINTF("unlinking mq name %s\n", name); + if (-1 == mq_unlink(name)) { + switch (errno) { + case EACCES: + PyErr_SetString(pPermissionsException, + "Permission denied"); + break; + + case ENOENT: + case EINVAL: + PyErr_SetString(pExistentialException, + "No queue exists with the specified name"); + break; + + case ENAMETOOLONG: + PyErr_SetString(PyExc_ValueError, "The name is too long"); + break; + + default: + PyErr_SetFromErrno(PyExc_OSError); + break; + } + goto error_return; + } + + Py_RETURN_NONE; + + error_return: + return NULL; +} + + +static int +mq_get_attrs(mqd_t mqd, struct mq_attr *attr) { + attr->mq_flags = 0; + attr->mq_maxmsg = 0; + attr->mq_msgsize = 0; + attr->mq_curmsgs = 0; + + if (-1 == mq_getattr(mqd, attr)) { + switch (errno) { + case EBADF: + case EINVAL: + PyErr_SetString(pExistentialException, + "The queue does not exist"); + break; + + default: + PyErr_SetFromErrno(PyExc_OSError); + break; + } + + goto error_return; + } + + return 0; + + error_return: + return -1; +} + + +static PyObject * +MessageQueue_new(PyTypeObject *type, PyObject *args, PyObject *kwlist) { + MessageQueue *self; + + self = (MessageQueue *)type->tp_alloc(type, 0); + + return (PyObject *)self; +} + + +static int +MessageQueue_init(MessageQueue *self, PyObject *args, PyObject *keywords) { + NoneableName name; + char temp_name[MAX_SAFE_NAME_LENGTH + 1]; + unsigned int flags = 0; + long max_messages = QUEUE_MESSAGES_MAX_DEFAULT; + long max_message_size = QUEUE_MESSAGE_SIZE_MAX_DEFAULT; + PyObject *py_read = NULL; + PyObject *py_write = NULL; + struct mq_attr attr; + static char *keyword_list[ ] = {"name", "flags", "mode", "max_messages", + "max_message_size", "read", "write", NULL}; + + // First things first -- initialize the self struct. + self->mqd = (mqd_t)0; + self->name = NULL; + self->mode = 0600; + self->notification_callback = NULL; + self->notification_callback_param = NULL; + + // MessageQueue(name, flags = 0, mode=0600, + // max_messages=QUEUE_MESSAGES_MAX_DEFAULT, + // max_message_size=QUEUE_MESSAGE_SIZE_MAX_DEFAULT, + // read = True, write = True) + + if (!PyArg_ParseTupleAndKeywords(args, keywords, "O&|IillOO", keyword_list, + &convert_name_param, &name, &flags, + &(self->mode), &max_messages, + &max_message_size, &py_read, &py_write)) + goto error_return; + + if ( !(flags & O_CREAT) && (flags & O_EXCL) ) { + PyErr_SetString(PyExc_ValueError, + "O_EXCL must be combined with O_CREAT"); + goto error_return; + } + + if (name.is_none && ((flags & O_EXCL) != O_EXCL)) { + PyErr_SetString(PyExc_ValueError, + "Name can only be None if O_EXCL is set"); + goto error_return; + } + + // read & write flags default to True, so if the user passed True I + // set the object pointers to their default values of NULL. So here + // NULL means True and any other value means False. Sorry for being + // backwards. + if (py_read && PyObject_IsTrue(py_read)) py_read = NULL; + + if (py_write && PyObject_IsTrue(py_write)) py_write = NULL; + + if ((!py_read) && (!py_write)) { + flags |= O_RDWR; + self->send_permitted = 1; + self->receive_permitted = 1; + } + + if ((!py_read) && (py_write)) { + flags |= O_RDONLY; + self->send_permitted = 0; + self->receive_permitted = 1; + } + + if ((py_read) && (!py_write)) { + flags |= O_WRONLY; + self->send_permitted = 1; + self->receive_permitted = 0; + } + + if ((py_read) && (py_write)) { + PyErr_SetString(PyExc_ValueError, "At least one of read or write must be True"); + goto error_return; + } + + // Params look OK, let's try to open/create the queue + if (flags & O_CREAT) { + // Set up the attr struct which is only needed when creating. + attr.mq_flags = (flags & O_NONBLOCK) ? O_NONBLOCK : 0; + attr.mq_maxmsg = max_messages; + attr.mq_msgsize = max_message_size; + attr.mq_curmsgs = 0; + } + + if (name.is_none) { + // (name == None) ==> generate a name for the caller + do { + errno = 0; + create_random_name(temp_name); + + DPRINTF("calling mq_open, name=%s, flags=0x%x, mode=0%o, maxmsg=%ld, msgsize=%ld\n", + temp_name, flags, (int)self->mode, attr.mq_maxmsg, attr.mq_msgsize); + self->mqd = mq_open(temp_name, flags, (mode_t)self->mode, &attr); + + } while ( ((mqd_t)-1 == self->mqd) && (EEXIST == errno) ); + + // PyMalloc memory and copy the randomly-generated name to it. + self->name = (char *)PyMem_Malloc(strlen(temp_name) + 1); + if (self->name) + strcpy(self->name, temp_name); + else { + PyErr_SetString(PyExc_MemoryError, "Out of memory"); + goto error_return; + } + } + else { + // (name != None) ==> use name supplied by the caller. It was + // already converted to C by convert_name_param(). + self->name = name.name; + + if (flags & O_CREAT) { + DPRINTF("calling mq_open, name=%s, flags=0x%x, mode=0%o, maxmsg=%ld, msgsize=%ld\n", + self->name, flags, (int)self->mode, attr.mq_maxmsg, attr.mq_msgsize); + self->mqd = mq_open(self->name, flags, (mode_t)self->mode, &attr); + } + else { + DPRINTF("calling mq_open, name=%s, flags=0x%x\n", self->name, flags); + self->mqd = mq_open(self->name, flags); + } + } + + DPRINTF("mqd = %ld\n", (long)self->mqd); + + if ((mqd_t)-1 == self->mqd) { + self->mqd = (mqd_t)0; + switch (errno) { + case EINVAL: + PyErr_SetString(PyExc_ValueError, "Invalid parameter(s)"); + break; + + case ENOSPC: + PyErr_SetString(PyExc_OSError, + "Insufficient space for a new queue"); + break; + + case EACCES: + PyErr_SetString(pPermissionsException, "Permission denied"); + break; + + case EEXIST: + PyErr_SetString(pExistentialException, + "A queue with the specified name already exists"); + break; + + case ENOENT: + PyErr_SetString(pExistentialException, + "No queue exists with the specified name"); + break; + + case EMFILE: + PyErr_SetString(PyExc_OSError, + "This process already has the maximum number of files open"); + break; + + case ENFILE: + PyErr_SetString(PyExc_OSError, + "The system limit on the total number of open files has been reached"); + break; + + case ENAMETOOLONG: + PyErr_SetString(PyExc_ValueError, "The name is too long"); + break; + + case ENOMEM: + PyErr_SetString(PyExc_MemoryError, "Not enough memory"); + break; + + default: + PyErr_SetFromErrno(PyExc_OSError); + break; + } + + goto error_return; + } + + // self->mqd and self->name are already populated. Here's where I get + // the other two values. + if (0 == mq_get_attrs(self->mqd, &attr)) { + self->max_messages = attr.mq_maxmsg; + self->max_message_size = attr.mq_msgsize; + } + else { + // Oy vey, something has gone very wrong. The call to mq_open() + // succeeded but mq_getattr() failed?!? + PyErr_Clear(); + PyErr_SetString(pBaseException, "Unable to initialize object"); + goto error_return; + } + + // Last but not least, get a reference to the interpreter state. I only + // need this if the caller requests queue notifications that occur in + // a new thread, so much of the time this goes unused. + // It's my understanding that there's only one interpreter state to go + // around, so no matter which thread I get the interpreter state from, + // it will be the same interpreter state. + self->interpreter = PyThreadState_Get()->interp; + + return 0; + + error_return: + return -1; +} + + +static void +MessageQueue_dealloc(MessageQueue *self) { + DPRINTF("dealloc\n"); + PyMem_Free(self->name); + self->name = NULL; + + Py_XDECREF(self->notification_callback); + self->notification_callback = NULL; + Py_XDECREF(self->notification_callback_param); + self->notification_callback_param = NULL; + + Py_TYPE(self)->tp_free((PyObject*)self); +} + + +static PyObject * +MessageQueue_send(MessageQueue *self, PyObject *args, PyObject *keywords) { + NoneableTimeout timeout; + long priority = 0; + int rc = 0; + static char *keyword_list[ ] = {"message", "timeout", "priority", NULL}; +#if PY_MAJOR_VERSION > 2 + static char args_format[] = "s*|O&l"; + Py_buffer msg; +#else + static char args_format[] = "s#|O&l"; + typedef struct { + char *buf; + unsigned long len; + } MyBuffer; + MyBuffer msg; +#endif + + // Initialize this to the default of None. + timeout.is_none = 1; + + /* In Python >= 2.5, the Python argument specifier 's#' expects a + py_ssize_t for its second parameter (msg.len). A ulong is long + enough to fit a py_ssize_t. + It might be too big, though, on platforms where a ulong is larger than + py_ssize_t. Therefore I *must* initialize it to 0 so that whatever + Python doesn't write to is zeroed out. + */ + msg.len = 0; + + if (!PyArg_ParseTupleAndKeywords(args, keywords, args_format, keyword_list, +#if PY_MAJOR_VERSION > 2 + &msg, +#else + &(msg.buf), &(msg.len), +#endif + convert_timeout, &timeout, + &priority)) + goto error_return; + + if (!self->send_permitted) { + PyErr_SetString(pPermissionsException, "The queue is not open for writing"); + goto error_return; + } + + if (msg.len > self->max_message_size) { + PyErr_Format(PyExc_ValueError, + "The message must be no longer than %ld bytes", + self->max_message_size); + } + + if ((priority < 0) || (priority > QUEUE_PRIORITY_MAX)) { + PyErr_Format(PyExc_ValueError, + "The priority must be a positive number no greater than QUEUE_PRIORITY_MAX (%u)", + QUEUE_PRIORITY_MAX); + goto error_return; + } + + Py_BEGIN_ALLOW_THREADS + // timeout == None: no timeout, i.e. wait forever. + // timeout >= 0: wait no longer than t seconds before raising an error. + if (timeout.is_none) { + DPRINTF("calling mq_send(), mqd=%ld, msg len=%ld, priority=%ld\n", + (long)self->mqd, (long)msg.len, priority); + rc = mq_send(self->mqd, msg.buf, msg.len, (unsigned int)priority); + } + else { + // Timeout is not None (i.e. is numeric) + DPRINTF("calling mq_timedsend(), mqd=%ld, msg len=%ld, priority=%ld\n", + (long)self->mqd, (long)msg.len, priority); + DPRINTF("timeout tv_sec = %ld; timeout tv_nsec = %ld\n", + timeout.timestamp.tv_sec, timeout.timestamp.tv_nsec); + + rc = mq_timedsend(self->mqd, msg.buf, msg.len, (unsigned int)priority, + &(timeout.timestamp)); + } + Py_END_ALLOW_THREADS + + if (-1 == rc) { + switch (errno) { + case EBADF: + case EINVAL: + // The POSIX spec & Linux doc say that EINVAL can mean -- + // 1) self->mqd is not valid for writing + // 2) timeout is < 0 or > one billion. + // Since my code above guards against out-of-range + // params, I expect only the first condition. + PyErr_SetString(pExistentialException, + "The message queue does not exist or is not open for writing"); + break; + + case EINTR: + /* If the signal was generated by Ctrl-C, calling + PyErr_CheckSignals() here has the side effect of setting + Python's error indicator. Otherwise there's a good chance + it won't be set. + http://groups.google.com/group/comp.lang.python/browse_thread/thread/ada39e984dfc3da6/fd6becbdce91a6be?#fd6becbdce91a6be + */ + PyErr_CheckSignals(); + + if (!(PyErr_Occurred() && + PyErr_ExceptionMatches(PyExc_KeyboardInterrupt)) + ) { + PyErr_Clear(); + PyErr_SetString(pSignalException, + "The wait was interrupted by a signal"); + } + // else + // If KeyboardInterrupt error is set, I propogate that + // up to the caller. + break; + + case EAGAIN: + case ETIMEDOUT: + PyErr_SetString(pBusyException, "The queue is full"); + break; + + case EMSGSIZE: + // This should never happen since I checked message length + // above, but who knows... + PyErr_SetString(PyExc_ValueError, "The message is too long"); + break; + + default: + PyErr_SetFromErrno(PyExc_OSError); + break; + } + + goto error_return; + } + +#if PY_MAJOR_VERSION > 2 + PyBuffer_Release(&msg); +#endif + + Py_RETURN_NONE; + + error_return: +#if PY_MAJOR_VERSION > 2 + PyBuffer_Release(&msg); +#endif + return NULL; +} + + +static PyObject * +MessageQueue_receive(MessageQueue *self, PyObject *args, PyObject *keywords) { + NoneableTimeout timeout; + char *msg = NULL; + unsigned int priority = 0; + ssize_t size = 0; + PyObject *py_return_tuple = NULL; + + // Initialize this to the default of None. + timeout.is_none = 1; + + if (!PyArg_ParseTuple(args, "|O&", convert_timeout, &timeout)) + goto error_return; + + if (!self->receive_permitted) { + PyErr_SetString(pPermissionsException, "The queue is not open for reading"); + goto error_return; + } + + msg = (char *)malloc(self->max_message_size); + + if (!msg) { + PyErr_SetString(PyExc_MemoryError, "Out of memory"); + goto error_return; + } + + Py_BEGIN_ALLOW_THREADS + // timeout == None: no timeout, i.e. wait forever. + // timeout >= 0: wait no longer than t seconds before raising an error. + if (timeout.is_none) { + DPRINTF("Calling mq_receive(), mqd=%ld; msg buffer length = %ld\n", + (long)self->mqd, self->max_message_size); + size = mq_receive(self->mqd, msg, self->max_message_size, &priority); + } + else { + // Timeout is not None (i.e. is numeric) + DPRINTF("Calling mq_timedreceive(), mqd=%ld; msg buffer length = %ld\n", + (long)self->mqd, self->max_message_size); + DPRINTF("timeout tv_sec = %ld; timeout tv_nsec = %ld\n", + timeout.timestamp.tv_sec, + timeout.timestamp.tv_nsec); + + size = mq_timedreceive(self->mqd, msg, self->max_message_size, + &priority, &(timeout.timestamp)); + } + Py_END_ALLOW_THREADS + + if (-1 == size) { + switch (errno) { + case EBADF: + case EINVAL: + // The POSIX spec & Linux doc say that EINVAL has three + // meanings -- + // 1) self->mqd is not open for reading + // 2) timeout is < 0 or > one billion. + // 3) msg len is out of range. + // Since my code above guards against out-of-range + // params, I expect only the first condition. + PyErr_SetString(pExistentialException, + "The message queue does not exist or is not open for reading"); + break; + + case EINTR: + /* If the signal was generated by Ctrl-C, calling + PyErr_CheckSignals() here has the side effect of setting + Python's error indicator. Otherwise there's a good chance + it won't be set. + http://groups.google.com/group/comp.lang.python/browse_thread/thread/ada39e984dfc3da6/fd6becbdce91a6be?#fd6becbdce91a6be + */ + PyErr_CheckSignals(); + + if (!(PyErr_Occurred() && + PyErr_ExceptionMatches(PyExc_KeyboardInterrupt)) + ) { + PyErr_Clear(); + PyErr_SetString(pSignalException, + "The wait was interrupted by a signal"); + } + // else + // If KeyboardInterrupt error is set, I propogate that + // up to the caller. + break; + + case EAGAIN: + case ETIMEDOUT: + PyErr_SetString(pBusyException, "The queue is empty"); + break; + + default: + PyErr_SetFromErrno(PyExc_OSError); + break; + } + + goto error_return; + } + + py_return_tuple = Py_BuildValue("NN", +#if PY_MAJOR_VERSION > 2 + PyBytes_FromStringAndSize(msg, size), + PyLong_FromLong((long)priority) +#else + PyString_FromStringAndSize(msg, size), + PyInt_FromLong((long)priority) +#endif + ); + + free(msg); + + return py_return_tuple; + + error_return: + free(msg); + + return NULL; +} + + +static PyObject * +MessageQueue_unlink(MessageQueue *self) { + return my_mq_unlink(self->name); +} + + +void dprint_current_thread_id(void) { + // Debug print only. Note that calling PyThreadState_Get() when there's + // no current thread is a fatal error, so calling this can crash your + // app. + DPRINTF("Current thread has id %lu\n", PyThreadState_Get()->thread_id); +} + + +void process_notification(union sigval notification_data) { + /* Invoked by the system in a new thread as notification of a message + arriving in the queue. */ + PyObject *py_args; + PyObject *py_result; + MessageQueue *self = notification_data.sival_ptr; + PyObject *callback_function = NULL; + PyObject *callback_param = NULL; + PyGILState_STATE gstate; + + DPRINTF("C thread %lu invoked, calling PyGILState_Ensure()\n", pthread_self()); + + gstate = PyGILState_Ensure(); + + /* Notifications are one-offs; the caller must re-register if he wants + more. Therefore I must discard my pointers to the callback function + and param after the callback is complete. + + But the caller may want to re-request notification during the callback. + If he does so, MessageQueue_request_notification() will be invoked + and self->notification_callback and ->notification_callback_param + will get overwritten. Therefore I need to make copies of them here + under the assumption that my self-> pointers won't survive the + callback and DECREF them after the callback is complete. + */ + callback_function = self->notification_callback; + callback_param = self->notification_callback_param; + self->notification_callback = NULL; + self->notification_callback_param = NULL; + + // Perform the callback. + DPRINTF("Performing the callback...\n"); + py_args = Py_BuildValue("(O)", callback_param); + py_result = PyObject_CallObject(callback_function, py_args); + Py_DECREF(py_args); + + // If py_result is NULL, the call failed. However, I want to return + // control to the main thread before I raise an error, so I deal with + // py_result later. + + DPRINTF("Done calling\n"); + + // Now I can clean these up safely. + Py_XDECREF(callback_function); + Py_XDECREF(callback_param); + + if (!py_result) { + DPRINTF("Invoking the callback failed\n"); + // FIXME - setting the error indicator here doesn't seem to + // propogate up to the main thread, so I can't figure out how to + // squawk if the callback fails. + //PyErr_SetString(pBaseException, "Invoking the callback failed"); + } + + Py_XDECREF(py_result); + + /* Release the thread. No Python API allowed beyond this point. */ + DPRINTF("Calling PyGILState_Release()\n"); + PyGILState_Release(gstate); + + DPRINTF("exiting thread\n"); +}; + + +static PyObject * +MessageQueue_request_notification(MessageQueue *self, PyObject *args) { + struct sigevent notification; + PyObject *py_callback = NULL; + PyObject *py_callback_param = NULL; + PyObject *py_notification = Py_None; + int param_is_ok = 1; + + // request_notification(notification = None) + + if (!PyArg_ParseTuple(args, "|O", &py_notification)) + goto error_return; + + // py_notification can be None ==> cancel, an int ==> signal, + // or a tuple of (callback function, param) + if (py_notification == Py_None) { + notification.sigev_notify = SIGEV_NONE; + } +#if PY_MAJOR_VERSION > 2 + else if (PyLong_Check(py_notification)) +#else + else if (PyInt_Check(py_notification)) +#endif + { + notification.sigev_notify = SIGEV_SIGNAL; +#if PY_MAJOR_VERSION > 2 + notification.sigev_signo = (int)PyLong_AsLong(py_notification); +#else + notification.sigev_signo = (int)PyInt_AsLong(py_notification); +#endif + } + else if (PyTuple_Check(py_notification)) { + notification.sigev_notify = SIGEV_THREAD; + + if (2 == PyTuple_Size(py_notification)) { + py_callback = PyTuple_GetItem(py_notification, 0); + py_callback_param = PyTuple_GetItem(py_notification, 1); + + if (!PyCallable_Check(py_callback)) + param_is_ok = 0; + } + else + param_is_ok = 0; + } + else + param_is_ok = 0; + + if (!param_is_ok) { + PyErr_SetString(PyExc_ValueError, + "The notification must be None, an integer, or a tuple of (function, parameter)"); + goto error_return; + } + + // At this point the param is either None, in which case I want to + // cancel any existing notification request, or it is requesting + // signal or thread notification, in which case I also want to cancel + // any existing notification request. + mq_cancel_notification(self); + + if (SIGEV_THREAD == notification.sigev_notify) { + // I have to do a bit more work before calling mq_notify(). + + // Store the new callback & param in self + Py_INCREF(py_callback); + Py_INCREF(py_callback_param); + self->notification_callback = py_callback; + self->notification_callback_param = py_callback_param; + + // Set up notification struct for passing to mq_notify() + notification.sigev_value.sival_ptr = self; + notification.sigev_notify_function = process_notification; + notification.sigev_notify_attributes = NULL; + + // When notification occurs, it will be in a (new) C thread. In that + // thread I'll create a Python thread but beforehand, threads must be + // initialized. + if (!PyEval_ThreadsInitialized()) { + DPRINTF("calling PyEval_InitThreads()\n"); + PyEval_InitThreads(); + } + + dprint_current_thread_id(); + } + + if (SIGEV_NONE != notification.sigev_notify) { + // request notification + if (-1 == mq_notify(self->mqd, ¬ification)) { + switch (errno) { + case EBUSY: + PyErr_SetString(pBusyException, + "The queue is already delivering notifications elsewhere"); + break; + + default: + PyErr_SetFromErrno(PyExc_OSError); + break; + } + + // If setting up the notification failed, there's no reason to + // hang on to these references. + Py_XDECREF(self->notification_callback); + self->notification_callback = NULL; + Py_XDECREF(self->notification_callback_param); + self->notification_callback_param = NULL; + + goto error_return; + } + } + + DPRINTF("exiting MessageQueue_request_notification()\n"); + + Py_RETURN_NONE; + + error_return: + return NULL; +} + + +static PyObject * +MessageQueue_close(MessageQueue *self) { + if (-1 == mq_close(self->mqd)) { + switch (errno) { + case EINVAL: + case EBADF: + PyErr_SetString(pExistentialException, + "The queue does not exist"); + break; + + default: + PyErr_SetFromErrno(PyExc_OSError); + break; + } + goto error_return; + } + else + self->mqd = 0; + + Py_RETURN_NONE; + + error_return: + return NULL; +} + + +PyObject * +MessageQueue_get_mqd(MessageQueue *self) { + // This is a little awkward because an mqd is a void * under Solaris + // and an int under Linux. I cast it and hope for the best. :-/ +#if PY_MAJOR_VERSION > 2 + return PyLong_FromLong((long)self->mqd); +#else + if ( ((long)self->mqd > PY_INT_MAX) || ((long)self->mqd < (0 - PY_INT_MAX)) ) + return PyLong_FromLong((long)self->mqd); + else + return PyInt_FromLong((long)self->mqd); +#endif +} + + +PyObject * +MessageQueue_get_block(MessageQueue *self) { + struct mq_attr attr; + + if (-1 == mq_get_attrs(self->mqd, &attr)) + return NULL; + else { + if (attr.mq_flags & O_NONBLOCK) + Py_RETURN_FALSE; + else + Py_RETURN_TRUE; + } +} + + +static int +MessageQueue_set_block(MessageQueue *self, PyObject *value) { + struct mq_attr attr; + + attr.mq_flags = PyObject_IsTrue(value) ? 0 : O_NONBLOCK; + + if (-1 == mq_setattr(self->mqd, &attr, NULL)) { + switch (errno) { + case EBADF: + PyErr_SetString(pExistentialException, + "The queue does not exist"); + break; + + default: + PyErr_SetFromErrno(PyExc_OSError); + break; + } + + goto error_return; + } + + return 0; + + error_return: + return -1; +} + +PyObject * +MessageQueue_get_current_messages(MessageQueue *self) { + struct mq_attr attr; + + if (-1 == mq_get_attrs(self->mqd, &attr)) + return NULL; + else + return Py_BuildValue("k", (unsigned long)attr.mq_curmsgs); +} + +// end of #ifdef MESSAGE_QUEUE_SUPPORT_EXISTS +#endif + + +/* ===== End Message Queue implementation functions ===== */ + + + + +/* + * + * Semaphore meta stuff for describing myself to Python + * + */ + +static PyMemberDef Semaphore_members[] = { + { "name", + T_STRING, + offsetof(Semaphore, name), + READONLY, + "The name specified in the constructor" + }, + { "mode", + T_LONG, + offsetof(Semaphore, mode), + READONLY, + "The mode specified in the constructor" + }, + {NULL} /* Sentinel */ +}; + + +static PyMethodDef Semaphore_methods[] = { + { "__enter__", + (PyCFunction)Semaphore_enter, + METH_NOARGS, + }, + { "__exit__", + (PyCFunction)Semaphore_exit, + METH_VARARGS, + }, + { "acquire", + (PyCFunction)Semaphore_acquire, + METH_VARARGS, + "Acquire (grab) the semaphore, waiting if necessary" + }, + { "release", + (PyCFunction)Semaphore_release, + METH_NOARGS, + "Release the semaphore" + }, + { "close", + (PyCFunction)Semaphore_close, + METH_NOARGS, + "Close the semaphore for this process." + }, + { "unlink", + (PyCFunction)Semaphore_unlink, + METH_NOARGS, + "Unlink (remove) the semaphore." + }, + {NULL, NULL, 0, NULL} /* Sentinel */ +}; + + +static PyGetSetDef Semaphore_getseters[] = { +#ifdef SEM_GETVALUE_EXISTS + {"value", (getter)Semaphore_getvalue, (setter)NULL, "value", NULL}, +#endif + {NULL} /* Sentinel */ +}; + + +static PyTypeObject SemaphoreType = { + PyVarObject_HEAD_INIT(NULL, 0) + "posix_ipc.Semaphore", // tp_name + sizeof(Semaphore), // tp_basicsize + 0, // tp_itemsize + (destructor) Semaphore_dealloc, // tp_dealloc + 0, // tp_print + 0, // tp_getattr + 0, // tp_setattr + 0, // tp_compare + (reprfunc) sem_repr, // tp_repr + 0, // tp_as_number + 0, // tp_as_sequence + 0, // tp_as_mapping + 0, // tp_hash + 0, // tp_call + (reprfunc) sem_str, // tp_str + 0, // tp_getattro + 0, // tp_setattro + 0, // tp_as_buffer + Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, + // tp_flags + "POSIX semaphore object", // tp_doc + 0, // tp_traverse + 0, // tp_clear + 0, // tp_richcompare + 0, // tp_weaklistoffset + 0, // tp_iter + 0, // tp_iternext + Semaphore_methods, // tp_methods + Semaphore_members, // tp_members + Semaphore_getseters, // tp_getset + 0, // tp_base + 0, // tp_dict + 0, // tp_descr_get + 0, // tp_descr_set + 0, // tp_dictoffset + (initproc) Semaphore_init, // tp_init + 0, // tp_alloc + (newfunc) Semaphore_new, // tp_new + 0, // tp_free + 0, // tp_is_gc + 0 // tp_bases +}; + + +/* + * + * Shared memory meta stuff for describing myself to Python + * + */ + + +static PyMemberDef SharedMemory_members[] = { + { "name", + T_STRING, + offsetof(SharedMemory, name), + READONLY, + "The name specified in the constructor" + }, + { "fd", + T_INT, + offsetof(SharedMemory, fd), + READONLY, + "Shared memory segment file descriptor" + }, + { "mode", + T_LONG, + offsetof(SharedMemory, mode), + READONLY, + "The mode specified in the constructor" + }, + {NULL} /* Sentinel */ +}; + + +static PyMethodDef SharedMemory_methods[] = { + { "close_fd", + (PyCFunction)SharedMemory_close_fd, + METH_NOARGS, + "Closes the file descriptor associated with the shared memory." + }, + { "unlink", + (PyCFunction)SharedMemory_unlink, + METH_NOARGS, + "Unlink (remove) the shared memory." + }, + {NULL, NULL, 0, NULL} /* Sentinel */ +}; + + +static PyGetSetDef SharedMemory_getseters[] = { + // size is read-only + { "size", + (getter)SharedMemory_getsize, + (setter)NULL, + "size", + NULL + }, + {NULL} /* Sentinel */ +}; + + +static PyTypeObject SharedMemoryType = { + PyVarObject_HEAD_INIT(NULL, 0) + "posix_ipc.SharedMemory", // tp_name + sizeof(SharedMemory), // tp_basicsize + 0, // tp_itemsize + (destructor) SharedMemory_dealloc, // tp_dealloc + 0, // tp_print + 0, // tp_getattr + 0, // tp_setattr + 0, // tp_compare + (reprfunc) shm_repr, // tp_repr + 0, // tp_as_number + 0, // tp_as_sequence + 0, // tp_as_mapping + 0, // tp_hash + 0, // tp_call + (reprfunc) shm_str, // tp_str + 0, // tp_getattro + 0, // tp_setattro + 0, // tp_as_buffer + Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, + // tp_flags + "POSIX shared memory object", // tp_doc + 0, // tp_traverse + 0, // tp_clear + 0, // tp_richcompare + 0, // tp_weaklistoffset + 0, // tp_iter + 0, // tp_iternext + SharedMemory_methods, // tp_methods + SharedMemory_members, // tp_members + SharedMemory_getseters, // tp_getset + 0, // tp_base + 0, // tp_dict + 0, // tp_descr_get + 0, // tp_descr_set + 0, // tp_dictoffset + (initproc) SharedMemory_init, // tp_init + 0, // tp_alloc + (newfunc) SharedMemory_new, // tp_new + 0, // tp_free + 0, // tp_is_gc + 0 // tp_bases +}; + + +/* + * + * Message queue meta stuff for describing myself to Python + * + */ + +#ifdef MESSAGE_QUEUE_SUPPORT_EXISTS + +static PyMemberDef MessageQueue_members[] = { + { "name", + T_STRING, + offsetof(MessageQueue, name), + READONLY, + "The name specified in the constructor" + }, + { "max_messages", + T_LONG, + offsetof(MessageQueue, max_messages), + READONLY, + "Queue slots" + }, + { "max_message_size", + T_LONG, + offsetof(MessageQueue, max_message_size), + READONLY, + "Maximum number of bytes per message" + }, + { "mode", + T_LONG, + offsetof(MessageQueue, mode), + READONLY, + "The mode specified in the constructor" + }, + {NULL} /* Sentinel */ +}; + + +static PyMethodDef MessageQueue_methods[] = { + { "send", + (PyCFunction)MessageQueue_send, + METH_VARARGS | METH_KEYWORDS, + "Send a message via the queue" + }, + { "receive", + (PyCFunction)MessageQueue_receive, + METH_VARARGS, + "Receive a message from the queue" + }, + { "close", + (PyCFunction)MessageQueue_close, + METH_NOARGS, + "Close the queue's descriptor" + }, + { "unlink", + (PyCFunction)MessageQueue_unlink, + METH_NOARGS, + "Unlink the queue" + }, + { "request_notification", + (PyCFunction)MessageQueue_request_notification, + METH_VARARGS, + "Request notification of the queue becoming non-empty" + }, + + {NULL, NULL, 0, NULL} /* Sentinel */ +}; + + +static PyGetSetDef MessageQueue_getseters[] = { + { "block", + (getter)MessageQueue_get_block, + (setter)MessageQueue_set_block, + "block", + NULL + }, + { "mqd", + (getter)MessageQueue_get_mqd, + (setter)NULL, + "Message queue descriptor", + NULL + }, + { "current_messages", + (getter)MessageQueue_get_current_messages, + (setter)NULL, + "current_message_count", + NULL + }, + {NULL} /* Sentinel */ +}; + +static PyTypeObject MessageQueueType = { + PyVarObject_HEAD_INIT(NULL, 0) + "posix_ipc.MessageQueue", // tp_name + sizeof(MessageQueue), // tp_basicsize + 0, // tp_itemsize + (destructor) MessageQueue_dealloc, // tp_dealloc + 0, // tp_print + 0, // tp_getattr + 0, // tp_setattr + 0, // tp_compare + (reprfunc) mq_repr, // tp_repr + 0, // tp_as_number + 0, // tp_as_sequence + 0, // tp_as_mapping + 0, // tp_hash + 0, // tp_call + (reprfunc) mq_str, // tp_str + 0, // tp_getattro + 0, // tp_setattro + 0, // tp_as_buffer + Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, + // tp_flags + "POSIX message queue object", // tp_doc + 0, // tp_traverse + 0, // tp_clear + 0, // tp_richcompare + 0, // tp_weaklistoffset + 0, // tp_iter + 0, // tp_iternext + MessageQueue_methods, // tp_methods + MessageQueue_members, // tp_members + MessageQueue_getseters, // tp_getset + 0, // tp_base + 0, // tp_dict + 0, // tp_descr_get + 0, // tp_descr_set + 0, // tp_dictoffset + (initproc) MessageQueue_init, // tp_init + 0, // tp_alloc + (newfunc) MessageQueue_new, // tp_new + 0, // tp_free + 0, // tp_is_gc + 0 // tp_bases +}; + +// end of #ifdef MESSAGE_QUEUE_SUPPORT_EXISTS +#endif + + +/* + * + * Module-level functions & meta stuff + * + */ + +static PyObject * +posix_ipc_unlink_semaphore(PyObject *self, PyObject *args) { + const char *name; + + if (!PyArg_ParseTuple(args, "s", &name)) + return NULL; + else + return my_sem_unlink(name); +} + + +static PyObject * +posix_ipc_unlink_shared_memory(PyObject *self, PyObject *args) { + const char *name; + + if (!PyArg_ParseTuple(args, "s", &name)) + return NULL; + else + return my_shm_unlink(name); +} + + +#ifdef MESSAGE_QUEUE_SUPPORT_EXISTS +static PyObject * +posix_ipc_unlink_message_queue(PyObject *self, PyObject *args) { + const char *name; + + if (!PyArg_ParseTuple(args, "s", &name)) + return NULL; + else + return my_mq_unlink(name); +} +#endif + + +static PyMethodDef module_methods[ ] = { + { "unlink_semaphore", + (PyCFunction)posix_ipc_unlink_semaphore, + METH_VARARGS, + "Unlink a semaphore" + }, + { "unlink_shared_memory", + (PyCFunction)posix_ipc_unlink_shared_memory, + METH_VARARGS, + "Unlink shared memory" + }, +#ifdef MESSAGE_QUEUE_SUPPORT_EXISTS + { "unlink_message_queue", + (PyCFunction)posix_ipc_unlink_message_queue, + METH_VARARGS, + "Unlink a message queue" + }, +#endif + {NULL} /* Sentinel */ +}; + + +#if PY_MAJOR_VERSION > 2 +static struct PyModuleDef this_module = { + PyModuleDef_HEAD_INIT, // m_base + "posix_ipc", // m_name + "POSIX IPC module", // m_doc + -1, // m_size (space allocated for module globals) + module_methods, // m_methods + NULL, // m_reload + NULL, // m_traverse + NULL, // m_clear + NULL // m_free +}; +#endif + +/* Module init function */ +#if PY_MAJOR_VERSION > 2 +#define POSIX_IPC_INIT_FUNCTION_NAME PyInit_posix_ipc +#else +#define POSIX_IPC_INIT_FUNCTION_NAME initposix_ipc +#endif + +/* Module init function */ +PyMODINIT_FUNC +POSIX_IPC_INIT_FUNCTION_NAME(void) { + PyObject *module; + PyObject *module_dict; + + // I call this in case I'm asked to create any random names. + srand((unsigned int)time(NULL)); + +#if PY_MAJOR_VERSION > 2 + module = PyModule_Create(&this_module); +#else + module = Py_InitModule3("posix_ipc", module_methods, "POSIX IPC module"); +#endif + + if (!module) + goto error_return; + + if (PyType_Ready(&SemaphoreType) < 0) + goto error_return; + + if (PyType_Ready(&SharedMemoryType) < 0) + goto error_return; + +#ifdef MESSAGE_QUEUE_SUPPORT_EXISTS + if (PyType_Ready(&MessageQueueType) < 0) + goto error_return; +#endif + + Py_INCREF(&SemaphoreType); + PyModule_AddObject(module, "Semaphore", (PyObject *)&SemaphoreType); + + Py_INCREF(&SharedMemoryType); + PyModule_AddObject(module, "SharedMemory", (PyObject *)&SharedMemoryType); + +#ifdef MESSAGE_QUEUE_SUPPORT_EXISTS + Py_INCREF(&MessageQueueType); + PyModule_AddObject(module, "MessageQueue", (PyObject *)&MessageQueueType); +#endif + + + PyModule_AddStringConstant(module, "VERSION", POSIX_IPC_VERSION); + PyModule_AddStringConstant(module, "__version__", POSIX_IPC_VERSION); + PyModule_AddStringConstant(module, "__copyright__", "Copyright 2012 Philip Semanchuk"); + PyModule_AddStringConstant(module, "__author__", "Philip Semanchuk"); + PyModule_AddStringConstant(module, "__license__", "BSD"); + + PyModule_AddIntConstant(module, "O_CREAT", O_CREAT); + PyModule_AddIntConstant(module, "O_EXCL", O_EXCL); + PyModule_AddIntConstant(module, "O_CREX", O_CREAT | O_EXCL); + PyModule_AddIntConstant(module, "O_TRUNC", O_TRUNC); +#ifdef MESSAGE_QUEUE_SUPPORT_EXISTS + Py_INCREF(Py_True); + PyModule_AddObject(module, "MESSAGE_QUEUES_SUPPORTED", Py_True); + PyModule_AddIntConstant(module, "O_RDONLY", O_RDONLY); + PyModule_AddIntConstant(module, "O_WRONLY", O_WRONLY); + PyModule_AddIntConstant(module, "O_RDWR", O_RDWR); + PyModule_AddIntConstant(module, "O_NONBLOCK", O_NONBLOCK); + PyModule_AddIntConstant(module, "QUEUE_MESSAGES_MAX_DEFAULT", QUEUE_MESSAGES_MAX_DEFAULT); + PyModule_AddIntConstant(module, "QUEUE_MESSAGE_SIZE_MAX_DEFAULT", QUEUE_MESSAGE_SIZE_MAX_DEFAULT); + PyModule_AddIntConstant(module, "QUEUE_PRIORITY_MAX", QUEUE_PRIORITY_MAX); + PyModule_AddIntConstant(module, "USER_SIGNAL_MIN", SIGRTMIN); + PyModule_AddIntConstant(module, "USER_SIGNAL_MAX", SIGRTMAX); +#else + Py_INCREF(Py_False); + PyModule_AddObject(module, "MESSAGE_QUEUES_SUPPORTED", Py_False); +#endif + + PyModule_AddIntConstant(module, "PAGE_SIZE", PAGE_SIZE); + + PyModule_AddIntConstant(module, "SEMAPHORE_VALUE_MAX", SEM_VALUE_MAX); + +#ifdef SEM_TIMEDWAIT_EXISTS + Py_INCREF(Py_True); + PyModule_AddObject(module, "SEMAPHORE_TIMEOUT_SUPPORTED", Py_True); +#else + Py_INCREF(Py_False); + PyModule_AddObject(module, "SEMAPHORE_TIMEOUT_SUPPORTED", Py_False); +#endif + +#ifdef SEM_GETVALUE_EXISTS + Py_INCREF(Py_True); + PyModule_AddObject(module, "SEMAPHORE_VALUE_SUPPORTED", Py_True); +#else + Py_INCREF(Py_False); + PyModule_AddObject(module, "SEMAPHORE_VALUE_SUPPORTED", Py_False); +#endif + + if (!(module_dict = PyModule_GetDict(module))) + goto error_return; + + // Exceptions + if (!(pBaseException = PyErr_NewException("posix_ipc.Error", NULL, NULL))) + goto error_return; + else + PyDict_SetItemString(module_dict, "Error", pBaseException); + + if (!(pSignalException = PyErr_NewException("posix_ipc.SignalError", pBaseException, NULL))) + goto error_return; + else + PyDict_SetItemString(module_dict, "SignalError", pSignalException); + + if (!(pPermissionsException = PyErr_NewException("posix_ipc.PermissionsError", pBaseException, NULL))) + goto error_return; + else + PyDict_SetItemString(module_dict, "PermissionsError", pPermissionsException); + + if (!(pExistentialException = PyErr_NewException("posix_ipc.ExistentialError", pBaseException, NULL))) + goto error_return; + else + PyDict_SetItemString(module_dict, "ExistentialError", pExistentialException); + + if (!(pBusyException = PyErr_NewException("posix_ipc.BusyError", pBaseException, NULL))) + goto error_return; + else + PyDict_SetItemString(module_dict, "BusyError", pBusyException); + +#if PY_MAJOR_VERSION > 2 + return module; +#endif + + error_return: +#if PY_MAJOR_VERSION > 2 + return NULL; +#else + ; // Nothing to do +#endif +} diff --git a/prober.py b/prober.py new file mode 100644 index 0000000..4bdcc72 --- /dev/null +++ b/prober.py @@ -0,0 +1,427 @@ +import subprocess +import platform +import os +import sys + +# Set these to None for compile/link debugging or subprocess.PIPE to silence +# compiler warnings and errors. +STDOUT = subprocess.PIPE +STDERR = subprocess.PIPE +# STDOUT = None +# STDERR = None + +# This is the max length that I want a printed line to be. +MAX_LINE_LENGTH = 78 + +PY_MAJOR_VERSION = sys.version_info[0] + +def line_wrap_paragraph(s): + # Format s with terminal-friendly line wraps. + done = False + beginning = 0 + end = MAX_LINE_LENGTH - 1 + lines = [ ] + while not done: + if end >= len(s): + done = True + lines.append(s[beginning:]) + else: + last_space = s[beginning:end].rfind(' ') + + lines.append(s[beginning:beginning + last_space]) + beginning += (last_space + 1) + end = beginning + MAX_LINE_LENGTH - 1 + + return lines + + +def print_bad_news(value_name, default): + s = "Setup can't determine %s on your system, so it will default to %s which may not be correct." \ + % (value_name, default) + plea = "Please report this message and your operating system info to the package maintainer listed in the README file." + + lines = line_wrap_paragraph(s) + [''] + line_wrap_paragraph(plea) + + border = '*' * MAX_LINE_LENGTH + + s = border + "\n* " + ('\n* '.join(lines)) + '\n' + border + + print (s) + + +def does_build_succeed(filename, linker_options = ""): + # Utility function that returns True if the file compiles and links + # successfully, False otherwise. + # Two things to note here -- + # - If there's a linker option like -lrt, it needs to come *after* + # the specification of the C file or linking will fail on Ubuntu 11.10 + # (maybe because of the gcc version?) + # - Some versions of Linux place the sem_xxx() functions in libpthread. + # Rather than testing whether or not it's needed, I just specify it + # everywhere since it's harmless to specify it when it's not needed. + cmd = "cc -Wall -o ./prober/foo ./prober/%s %s -lpthread" % (filename, linker_options) + + p = subprocess.Popen(cmd, shell=True, stdout=STDOUT, stderr=STDERR) + + # p.wait() returns the process' return code, so 0 implies that + # the compile & link succeeded. + return not bool(p.wait()) + + +def compile_and_run(filename, linker_options = ""): + # Utility function that returns the stdout output from running the + # compiled source file; None if the compile fails. + cmd = "cc -Wall -o ./prober/foo %s ./prober/%s" % (linker_options, filename) + + p = subprocess.Popen(cmd, shell=True, stdout=STDOUT, stderr=STDERR) + + if p.wait(): + # uh-oh, compile failed + return None + else: + s = subprocess.Popen(["./prober/foo"], + stdout=subprocess.PIPE).communicate()[0] + return s.strip().decode() + + +def get_sysctl_value(name): + """Given a sysctl name (e.g. 'kern.mqueue.maxmsg'), returns sysctl's value + for that variable or None if the sysctl call fails (unknown name, not + a BSD-ish system, etc.) + + Only makes sense on systems that implement sysctl (BSD derivatives). + """ + s = None + try: + # I redirect stderr to /dev/null because if sysctl is availble but + # doesn't know about the particular item I'm querying, it will + # kvetch with a message like 'second level name mqueue in + # kern.mqueue.maxmsg is invalid'. This always happens under OS X + # (which doesn't have any kern.mqueue values) and under FreeBSD when + # the mqueuefs kernel module isn't loaded. + s = subprocess.Popen(["sysctl", "-n", name], + stdout=subprocess.PIPE, + stderr=open(os.devnull, 'rw')).communicate()[0] + s = s.strip().decode() + except: + pass + + return s + + +def sniff_realtime_lib(): + rc = None + filename = "sniff_realtime_lib.c" + + if does_build_succeed(filename): + # Realtime libs not needed + rc = False + else: + # cc failed when not linked to realtime libs; let's try again + # with the realtime libs involved and see if things go better. + if does_build_succeed(filename, "-lrt"): + # Realtime libs are needed + rc = True + + if rc == None: + # Unable to determine whether or not I needed the realtime libs. + # That's bad! Print a warning, set the return code to False + # and hope for the best. + rc = False + print_bad_news("if it needs to link to the realtime libraries", "'no'") + + return rc + + +def sniff_sem_getvalue(linker_options): + return does_build_succeed("sniff_sem_getvalue.c", linker_options) + + +def sniff_sem_timedwait(linker_options): + return does_build_succeed("sniff_sem_timedwait.c", linker_options) + + +def sniff_sem_value_max(): + # default is to return None which means that it is #defined in a standard + # header file and doesn't need to be added to my custom header file. + sem_value_max = None + + if not does_build_succeed("sniff_sem_value_max.c"): + # OpenSolaris 2008.05 doesn't #define SEM_VALUE_MAX. (This may + # be true elsewhere too.) Ask sysconf() instead if it exists. + # Note that sys.sysconf_names doesn't exist under Cygwin. + if hasattr(os, "sysconf_names") and \ + ("SC_SEM_VALUE_MAX" in os.sysconf_names): + sem_value_max = os.sysconf("SC_SEM_VALUE_MAX") + else: + # This value of last resort should be #defined everywhere. What + # could possibly go wrong? + sem_value_max = "_POSIX_SEM_VALUE_MAX" + + return sem_value_max + + +def sniff_page_size(): + DEFAULT_PAGE_SIZE = 4096 + + # Linker options don't matter here because I'm not calling any + # functions, just getting the value of a #define. + page_size = compile_and_run("sniff_page_size.c") + + if page_size is None: + page_size = DEFAULT_PAGE_SIZE + print_bad_news("the value of PAGE_SIZE", page_size) + + return page_size + + +def sniff_mq_existence(linker_options): + return does_build_succeed("sniff_mq_existence.c", linker_options) + + +def sniff_mq_prio_max(): + # MQ_PRIO_MAX is #defined in limits.h on all of the systems that I + # checked that support message queues at all. (I checked 2 Linux boxes, + # OpenSolaris and FreeBSD 8.0.) + + # 32 = minimum allowable max priority per POSIX; systems are permitted + # to define a larger value. + # ref: http://www.opengroup.org/onlinepubs/009695399/basedefs/limits.h.html + DEFAULT_PRIORITY_MAX = 32 + + max_priority = None + # OS X up to and including 10.8 doesn't support POSIX messages queues and + # doesn't define MQ_PRIO_MAX. Maybe this aggravation will cease in 10.9? + if does_build_succeed("sniff_mq_prio_max.c"): + max_priority = compile_and_run("sniff_mq_prio_max.c") + + if max_priority: + try: + max_priority = int(max_priority) + except ValueError: + max_priority = None + + if max_priority is None: + # Looking for a #define didn't work; ask sysconf() instead. + # Note that sys.sysconf_names doesn't exist under Cygwin. + if hasattr(os, "sysconf_names") and \ + ("SC_MQ_PRIO_MAX" in os.sysconf_names): + max_priority = os.sysconf("SC_MQ_PRIO_MAX") + else: + max_priority = DEFAULT_PRIORITY_MAX + print_bad_news("the value of PRIORITY_MAX", max_priority) + + # Under OS X, os.sysconf("SC_MQ_PRIO_MAX") returns -1. + if max_priority < 0: + max_priority = DEFAULT_PRIORITY_MAX + + # Adjust for the fact that these are 0-based values; i.e. permitted + # priorities range from 0 - (MQ_PRIO_MAX - 1). So why not just make + # the #define one smaller? Because this one goes up to eleven... + max_priority -= 1 + + # priority is an unsigned int + return str(max_priority).strip() + "U" + + +def sniff_mq_max_messages(): + # This value is not defined by POSIX. + + # On most systems I've tested, msg Qs are implemented via mmap-ed files + # or a similar interface, so the only theoretical limits are imposed by the + # file system. In practice, Linux and *BSD impose some fairly tight + # limits. + + # On Linux it's available in a /proc file and often defaults to the wimpy + # value of 10. + + # On FreeBSD (and other BSDs, I assume), it's available via sysctl as + # kern.mqueue.maxmsg. On my FreeBSD 9.1 test system, it defaults to 100. + + # mqueue.h defines mq_attr.mq_maxmsg as a C long, so that's + # a practical limit for this value. + + # ref: http://linux.die.net/man/7/mq_overview + # ref: http://www.freebsd.org/cgi/man.cgi?query=mqueuefs&sektion=5&manpath=FreeBSD+7.0-RELEASE + # http://fxr.watson.org/fxr/source/kern/uipc_mqueue.c?v=FREEBSD91#L195 + # ref: http://groups.google.com/group/comp.unix.solaris/browse_thread/thread/aa223fc7c91f8c38 + # ref: http://cygwin.com/cgi-bin/cvsweb.cgi/src/winsup/cygwin/posix_ipc.cc?cvsroot=src + # ref: http://cygwin.com/cgi-bin/cvsweb.cgi/src/winsup/cygwin/include/mqueue.h?cvsroot=src + mq_max_messages = None + + # Try to get the value from where Linux stores it. + try: + mq_max_messages = int(open("/proc/sys/fs/mqueue/msg_max").read()) + except: + # Oh well. + pass + + if not mq_max_messages: + # Maybe we're on BSD. + mq_max_messages = get_sysctl_value('kern.mqueue.maxmsg') + if mq_max_messages: + mq_max_messages = int(mq_max_messages) + + if not mq_max_messages: + # We're on a non-Linux, non-BSD system, or OS X, or BSD with + # the mqueuefs kernel module not loaded (which it's not, by default, + # under FreeBSD 8.x and 9.x. which are the only systems I've tested). + # + # If we're on FreeBSD and mqueuefs isn't loaded when this code runs, + # sysctl won't be able to provide mq_max_messages to me. (I assume other + # BSDs behave the same.) If I use too large of a default, then every + # attempt to create a message queue via posix_ipc will fail with + # "ValueError: Invalid parameter(s)" unless the user explicitly sets + # the max_messages param. + if platform.system().endswith("BSD"): + # 100 is the value I see under FreeBSD 9.2. I hope this works + # elsewhere! + mq_max_messages = 100 + else: + # We're on a non-Linux, non-BSD system. I take a wild guess at an + # appropriate value. The max possible is > 2 billion, but the + # values used by Linux and FreeBSD suggest that a smaller default + # is wiser. + mq_max_messages = 1024 + + return mq_max_messages + + +def sniff_mq_max_message_size_default(): + # The max message size is not defined by POSIX. + + # On most systems I've tested, msg Qs are implemented via mmap-ed files + # or a similar interface, so the only theoretical limits are imposed by + # the file system. In practice, Linux and *BSD impose some tighter limits. + + # On Linux, max message size is available in a /proc file and often + # defaults to the value of 8192. + + # On FreeBSD (and other BSDs, I assume), it's available via sysctl as + # kern.mqueue.maxmsgsize. On my FreeBSD 9.1 test system, it defaults to + # 16384. + + # mqueue.h defines mq_attr.mq_msgsize as a C long, so that's + # a practical limit for this value. + + # Further complicating things is the fact that the module has to allocate + # a buffer the size of the queue's max message every time receive() is + # called, so it would be a bad idea to set this default to the max. + # I set it to 8192 -- not too small, not too big. I only set it smaller + # if I'm on a system that tells me I must do so. + DEFAULT = 8192 + mq_max_message_size_default = 0 + + # Try to get the value from where Linux stores it. + try: + mq_max_message_size_default = \ + int(open("/proc/sys/fs/mqueue/msgsize_max").read()) + except: + # oh well + pass + + if not mq_max_message_size_default: + # Maybe we're on BSD. + mq_max_message_size_default = get_sysctl_value('kern.mqueue.maxmsgsize') + if mq_max_message_size_default: + mq_max_message_size_default = int(mq_max_message_size_default) + + if not mq_max_message_size_default: + mq_max_message_size_default = DEFAULT + + return mq_max_message_size_default + + + +def probe(): + linker_options = "" + d = { } + + f = open("VERSION") + d["POSIX_IPC_VERSION"] = '"%s"' % f.read().strip() + f.close() + + # Sniffing of the realtime libs has to go early in the list so as + # to provide correct linker options to the rest of the tests. + if "Darwin" in platform.uname(): + # I skip the test under Darwin/OS X for two reasons. First, I know + # it isn't needed there. Second, I can't even compile the test for + # the realtime lib because it references mq_unlink() which OS X + # doesn't support. Unfortunately sniff_realtime_lib.c *must* + # reference mq_unlink() or some other mq_xxx() function because + # it is only the message queues that need the realtime libs under + # FreeBSD. + realtime_lib_is_needed = False + else: + # Some platforms (e.g. Linux & OpenSuse) require linking to librt + realtime_lib_is_needed = sniff_realtime_lib() + + if realtime_lib_is_needed: + d["REALTIME_LIB_IS_NEEDED"] = "" + linker_options = " -lrt " + + d["PAGE_SIZE"] = sniff_page_size() + + if sniff_sem_getvalue(linker_options): + d["SEM_GETVALUE_EXISTS"] = "" + + if ("SEM_GETVALUE_EXISTS" in d) and ("Darwin" in platform.uname()): + # sem_getvalue() isn't available on OS X. The function exists but + # always returns -1 (under OS X 10.9) or ENOSYS ("Function not + # implemented") under some earlier version(s). + del d["SEM_GETVALUE_EXISTS"] + + if sniff_sem_timedwait(linker_options): + d["SEM_TIMEDWAIT_EXISTS"] = "" + + d["SEM_VALUE_MAX"] = sniff_sem_value_max() + # A return of None means that I don't need to #define this myself. + if d["SEM_VALUE_MAX"] is None: + del d["SEM_VALUE_MAX"] + + if sniff_mq_existence(linker_options): + d["MESSAGE_QUEUE_SUPPORT_EXISTS"] = "" + + d["QUEUE_MESSAGES_MAX_DEFAULT"] = sniff_mq_max_messages() + d["QUEUE_MESSAGE_SIZE_MAX_DEFAULT"] = sniff_mq_max_message_size_default() + d["QUEUE_PRIORITY_MAX"] = sniff_mq_prio_max() + + if PY_MAJOR_VERSION == 2: + # I only need this for Python 2.x + d["PY_INT_MAX"] = sys.maxint + + + msg = """/* +This header file was generated when you ran setup. Once created, the setup +process won't overwrite it, so you can adjust the values by hand and +recompile if you need to. + +On your platform, this file may contain only this comment -- that's OK! + +To recreate this file, just delete it and re-run setup.py. +*/ + +""" + filename = "probe_results.h" + if not os.path.exists(filename): + lines = ["#define %s\t\t%s" % (key, d[key]) for key in d if key != "PAGE_SIZE"] + + # PAGE_SIZE gets some special treatment. It's defined in header files + # on some systems in which case I might get a redefinition error in + # my header file, so I wrap it in #ifndef/#endif. + + lines.append("#ifndef PAGE_SIZE") + lines.append("#define PAGE_SIZE\t\t%s" % d["PAGE_SIZE"]) + lines.append("#endif") + + # A trailing '\n' keeps compilers happy... + open(filename, "w").write(msg + '\n'.join(lines) + '\n') + + return d + + +if __name__ == "__main__": + print (probe()) + + + diff --git a/prober/sniff_mq_existence.c b/prober/sniff_mq_existence.c new file mode 100644 index 0000000..af98b15 --- /dev/null +++ b/prober/sniff_mq_existence.c @@ -0,0 +1,8 @@ +#include + +int main(void) { + mq_unlink(""); + + return 0; +} + diff --git a/prober/sniff_mq_prio_max.c b/prober/sniff_mq_prio_max.c new file mode 100644 index 0000000..63cf68d --- /dev/null +++ b/prober/sniff_mq_prio_max.c @@ -0,0 +1,9 @@ +#include +#include + +int main(void) { + printf("%ld\n", (long)MQ_PRIO_MAX); + + return 0; +} + diff --git a/prober/sniff_page_size.c b/prober/sniff_page_size.c new file mode 100644 index 0000000..6338168 --- /dev/null +++ b/prober/sniff_page_size.c @@ -0,0 +1,19 @@ +#include + +// Code for determining page size swiped from Python's mmapmodule.c +#if defined(HAVE_SYSCONF) && defined(_SC_PAGESIZE) +static int +my_getpagesize(void) +{ + return sysconf(_SC_PAGESIZE); +} +#else +#include +#define my_getpagesize getpagesize +#endif + +int main(void) { + printf("%d\n", my_getpagesize()); + + return 0; +} diff --git a/prober/sniff_realtime_lib.c b/prober/sniff_realtime_lib.c new file mode 100644 index 0000000..c0a15a4 --- /dev/null +++ b/prober/sniff_realtime_lib.c @@ -0,0 +1,11 @@ +#include + +int main(void) { + /* Under FreeBSD and OpenSuse, linking to the realtime lib is required, + but only for mq_xxx() functions so checking for sem_xxx() or shm_xxx() + here is not be a sufficient test. + */ + mq_unlink(""); + + return 0; +} diff --git a/prober/sniff_sem_getvalue.c b/prober/sniff_sem_getvalue.c new file mode 100644 index 0000000..4632853 --- /dev/null +++ b/prober/sniff_sem_getvalue.c @@ -0,0 +1,8 @@ +#include +#include + +int main(void) { + sem_getvalue(NULL, NULL); + return 0; +} + diff --git a/prober/sniff_sem_timedwait.c b/prober/sniff_sem_timedwait.c new file mode 100644 index 0000000..f1a564d --- /dev/null +++ b/prober/sniff_sem_timedwait.c @@ -0,0 +1,8 @@ +#include +#include + +int main(void) { + sem_timedwait(NULL, NULL); + return 0; +} + diff --git a/prober/sniff_sem_value_max.c b/prober/sniff_sem_value_max.c new file mode 100644 index 0000000..37b4004 --- /dev/null +++ b/prober/sniff_sem_value_max.c @@ -0,0 +1,7 @@ +// The location of this constant varies. + +#include +#include + +int main(void) { return SEM_VALUE_MAX; } + diff --git a/setup.py b/setup.py new file mode 100644 index 0000000..62d7ba1 --- /dev/null +++ b/setup.py @@ -0,0 +1,69 @@ +# Python modules +import distutils.core as duc +import platform + +# My modules +import prober + +VERSION = open("VERSION").read().strip() + +name = "posix_ipc" +description = "POSIX IPC primitives (semaphores, shared memory and message queues) for Python" +long_description = open("README").read().strip() +author = "Philip Semanchuk" +author_email = "philip@semanchuk.com" +maintainer = "Philip Semanchuk" +url = "http://semanchuk.com/philip/posix_ipc/" +download_url = "http://semanchuk.com/philip/posix_ipc/posix_ipc-%s.tar.gz" % VERSION +source_files = ["posix_ipc_module.c"] +# http://pypi.python.org/pypi?%3Aaction=list_classifiers +classifiers = [ "Development Status :: 5 - Production/Stable", + "Intended Audience :: Developers", + "License :: OSI Approved :: BSD License", + "Operating System :: MacOS :: MacOS X", + "Operating System :: POSIX :: BSD :: FreeBSD", + "Operating System :: POSIX :: Linux", + "Operating System :: POSIX :: SunOS/Solaris", + "Operating System :: POSIX", + "Operating System :: Unix", + "Programming Language :: Python", + "Programming Language :: Python :: 2", + "Programming Language :: Python :: 3", + "Topic :: Utilities" ] +license = "http://creativecommons.org/licenses/BSD/" +keywords = "ipc inter-process communication semaphore shared memory shm message queue" + +libraries = [ ] + +d = prober.probe() + +# Linux & FreeBSD require linking against the realtime libs +# This causes an error on other platforms +if "REALTIME_LIB_IS_NEEDED" in d: + libraries.append("rt") + +ext_modules = [ duc.Extension("posix_ipc", + source_files, + libraries=libraries, + depends = [ "posix_ipc_module.c", + "probe_results.h", + ], +# extra_compile_args=['-E'] + ) + ] + + +duc.setup(name = name, + version = VERSION, + description = description, + long_description = long_description, + author = author, + author_email = author_email, + maintainer = maintainer, + url = url, + download_url = download_url, + classifiers = classifiers, + license = license, + keywords = keywords, + ext_modules = ext_modules + ) -- cgit v1.2.1