From 0f0cf4b8b8ec1506b2f7e6858dfe4388e1850571 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ullrich=20K=C3=B6the?= Date: Wed, 22 Nov 2000 12:25:32 +0000 Subject: [PATCH] added comments for new functionality [SVN r8296] --- newtypes.h | 43 +++++++++++++++++++++++++++++++------------ operators.h | 35 ++++++++++++++++++++++++++++++++++- 2 files changed, 65 insertions(+), 13 deletions(-) diff --git a/newtypes.h b/newtypes.h index b4df4176..423e4533 100644 --- a/newtypes.h +++ b/newtypes.h @@ -298,6 +298,11 @@ PyObject* reprable::instance_repr(PyObject* obj) const return downcast(obj)->repr(); } + // Helper class for optimized allocation of PODs: If two PODs + // happen to contain identical byte patterns, they may share their + // memory. Reference counting is used to free unused memory. + // This is useful because method tables of related extension classes tend + // to be identical, so less memory is needed for them. class shared_pod_manager { typedef std::pair holder; @@ -307,30 +312,44 @@ PyObject* reprable::instance_repr(PyObject* obj) const static shared_pod_manager& obj(); ~shared_pod_manager(); - template - static void replace_if_equal(T*& t) - { - t = reinterpret_cast(obj().replace_if_equal(t, sizeof(T))); - } - - template - static void make_unique_copy(T*& t) - { - t = reinterpret_cast(obj().make_unique_copy(t, sizeof(T))); - } - + // Allocate memory for POD T and fill it with zeros. + // This memory is initially not shared. template static void create(T*& t) { t = reinterpret_cast(obj().create(sizeof(T))); } + // Decrement the refcount for the memory t points to. If the count + // goes to zero, the memory is freed. template static void dispose(T* t) { obj().dec_ref(t, sizeof(T)); } + // Attempt to share the memory t points to. If memory with the same + // contents already exists, t is replaced by a pointer to this memory, + // and t's old memory is disposed. Otherwise, t will be registered for + // potential future sharing. + template + static void replace_if_equal(T*& t) + { + t = reinterpret_cast(obj().replace_if_equal(t, sizeof(T))); + } + + // Create a copy of t's memory that is guaranteed to be private to t. + // Afterwards t points to the new memory, unless it was already private, in + // which case there is no change (except that t's memory will no longer + // be considered for future sharing - see raplade_if_equal()) + // This function *must* be called before the contents of (*t) can + // be overwritten. Otherwise, inconsistencies and crashes may result. + template + static void make_unique_copy(T*& t) + { + t = reinterpret_cast(obj().make_unique_copy(t, sizeof(T))); + } + private: void* replace_if_equal(void* pod, std::size_t size); void* make_unique_copy(void* pod, std::size_t size); diff --git a/operators.h b/operators.h index d9ff7878..95528bc8 100644 --- a/operators.h +++ b/operators.h @@ -11,9 +11,15 @@ namespace python { namespace detail { + + // helper class for automatic operand type detection + // during operator wrapping. struct auto_operand {}; } +// Define operator ids that can be or'ed together +// (python::op_add | python::op_sub | python::op_mul). +// This allows to wrap several operators in one line. enum operator_id { op_add = 0x1, @@ -39,12 +45,20 @@ enum operator_id op_cmp = 0x100000 }; +// Wrap the operators given by "which". Usage: +// foo_class.def(python::operators<(python::op_add | python::op_sub)>()); template struct operators {}; +// Wrap heterogeneous operators with given left operand type. Usage: +// foo_class.def(python::operators<(python::op_add | python::op_sub)>(), +// python::left_operand()); template struct left_operand {}; +// Wrap heterogeneous operators with given right operand type. Usage: +// foo_class.def(python::operators<(python::op_add | python::op_sub)>(), +// python::right_operand()); template struct right_operand {}; @@ -183,7 +197,14 @@ namespace detail }; }; - + + +// Fully specialize define_operator for all operators defined in operator_id above. +// Every specialization defines one function object for normal operator calls and one +// for operator calls with operands reversed ("__r*__" function variants). +// Specializations for most operators follow a standard pattern: execute the expression +// that uses the operator in question. This standard pattern is realized by the following +// macros so that the actual specialization can be done by just calling a macro. #define PY_DEFINE_BINARY_OPERATORS(id, oper) \ template <> \ struct define_operator \ @@ -269,6 +290,12 @@ namespace detail #undef PY_DEFINE_BINARY_OPERATORS #undef PY_DEFINE_UNARY_OPERATORS +// Some operators need special treatment, e.g. because there is no corresponding +// expression in C++. These are specialized manually. + +// pow(): Manual specialization needed because an error message is required if this +// function is called with three arguments. The "power modulo" operator is not +// supported by define_operator, but can be wrapped manually (see special.html). template <> struct define_operator { @@ -322,6 +349,8 @@ namespace detail static const char * rname() { return "__rpow__"; } }; +// divmod(): Manual specialization needed because we must actually call two operators and +// return a tuple containing both results template <> struct define_operator { @@ -379,6 +408,8 @@ namespace detail static const char * rname() { return "__rdivmod__"; } }; +// cmp(): Manual specialization needed because there is no three-way compare in C++. +// It is implemented by two one-way comparisons with operators reversed in the second. template <> struct define_operator { @@ -430,6 +461,8 @@ namespace detail static const char * rname() { return "__rcmp__"; } }; +// str(): Manual specialization needed because the string conversion does not follow +// the standard pattern relized by the macros. template <> struct define_operator {