2
0
mirror of https://github.com/boostorg/python.git synced 2026-01-21 17:12:22 +00:00
This commit is contained in:
Stefan Seefeld
2016-10-07 20:03:12 -04:00
64 changed files with 5131 additions and 35 deletions

200
src/numpy/dtype.cpp Normal file
View File

@@ -0,0 +1,200 @@
// Copyright Jim Bosch 2010-2012.
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#ifdef _MSC_VER
#include <boost/cstdint.hpp>
#endif
#define BOOST_NUMPY_INTERNAL
#include <boost/numpy/internal.hpp>
#define DTYPE_FROM_CODE(code) \
dtype(python::detail::new_reference(reinterpret_cast<PyObject*>(PyArray_DescrFromType(code))))
#define BUILTIN_INT_DTYPE(bits) \
template <> struct builtin_int_dtype< bits, false > { \
static dtype get() { return DTYPE_FROM_CODE(NPY_INT ## bits); } \
}; \
template <> struct builtin_int_dtype< bits, true > { \
static dtype get() { return DTYPE_FROM_CODE(NPY_UINT ## bits); } \
}; \
template dtype get_int_dtype< bits, false >(); \
template dtype get_int_dtype< bits, true >()
#define BUILTIN_FLOAT_DTYPE(bits) \
template <> struct builtin_float_dtype< bits > { \
static dtype get() { return DTYPE_FROM_CODE(NPY_FLOAT ## bits); } \
}; \
template dtype get_float_dtype< bits >()
#define BUILTIN_COMPLEX_DTYPE(bits) \
template <> struct builtin_complex_dtype< bits > { \
static dtype get() { return DTYPE_FROM_CODE(NPY_COMPLEX ## bits); } \
}; \
template dtype get_complex_dtype< bits >()
namespace boost { namespace python { namespace converter {
NUMPY_OBJECT_MANAGER_TRAITS_IMPL(PyArrayDescr_Type, numpy::dtype)
}}} // namespace boost::python::converter
namespace boost { namespace numpy {
namespace detail {
dtype builtin_dtype<bool,true>::get() { return DTYPE_FROM_CODE(NPY_BOOL); }
template <int bits, bool isUnsigned> struct builtin_int_dtype;
template <int bits> struct builtin_float_dtype;
template <int bits> struct builtin_complex_dtype;
template <int bits, bool isUnsigned> dtype get_int_dtype() {
return builtin_int_dtype<bits,isUnsigned>::get();
}
template <int bits> dtype get_float_dtype() { return builtin_float_dtype<bits>::get(); }
template <int bits> dtype get_complex_dtype() { return builtin_complex_dtype<bits>::get(); }
BUILTIN_INT_DTYPE(8);
BUILTIN_INT_DTYPE(16);
BUILTIN_INT_DTYPE(32);
BUILTIN_INT_DTYPE(64);
BUILTIN_FLOAT_DTYPE(16);
BUILTIN_FLOAT_DTYPE(32);
BUILTIN_FLOAT_DTYPE(64);
BUILTIN_COMPLEX_DTYPE(64);
BUILTIN_COMPLEX_DTYPE(128);
#if NPY_BITSOF_LONGDOUBLE > NPY_BITSOF_DOUBLE
template <> struct builtin_float_dtype< NPY_BITSOF_LONGDOUBLE > {
static dtype get() { return DTYPE_FROM_CODE(NPY_LONGDOUBLE); }
};
template dtype get_float_dtype< NPY_BITSOF_LONGDOUBLE >();
template <> struct builtin_complex_dtype< 2 * NPY_BITSOF_LONGDOUBLE > {
static dtype get() { return DTYPE_FROM_CODE(NPY_CLONGDOUBLE); }
};
template dtype get_complex_dtype< 2 * NPY_BITSOF_LONGDOUBLE >();
#endif
} // namespace detail
python::detail::new_reference dtype::convert(python::object const & arg, bool align) {
PyArray_Descr* obj=NULL;
if (align) {
if (PyArray_DescrAlignConverter(arg.ptr(), &obj) < 0)
python::throw_error_already_set();
} else {
if (PyArray_DescrConverter(arg.ptr(), &obj) < 0)
python::throw_error_already_set();
}
return python::detail::new_reference(reinterpret_cast<PyObject*>(obj));
}
int dtype::get_itemsize() const { return reinterpret_cast<PyArray_Descr*>(ptr())->elsize;}
bool equivalent(dtype const & a, dtype const & b) {
// On Windows x64, the behaviour described on
// http://docs.scipy.org/doc/numpy/reference/c-api.array.html for
// PyArray_EquivTypes unfortunately does not extend as expected:
// "For example, on 32-bit platforms, NPY_LONG and NPY_INT are equivalent".
// This should also hold for 64-bit platforms (and does on Linux), but not
// on Windows. Implement an alternative:
#ifdef _MSC_VER
if (sizeof(long) == sizeof(int) &&
// Manually take care of the type equivalence.
((a == dtype::get_builtin<long>() || a == dtype::get_builtin<int>()) &&
(b == dtype::get_builtin<long>() || b == dtype::get_builtin<int>()) ||
(a == dtype::get_builtin<unsigned int>() || a == dtype::get_builtin<unsigned long>()) &&
(b == dtype::get_builtin<unsigned int>() || b == dtype::get_builtin<unsigned long>()))) {
return true;
} else {
return PyArray_EquivTypes(
reinterpret_cast<PyArray_Descr*>(a.ptr()),
reinterpret_cast<PyArray_Descr*>(b.ptr())
);
}
#else
return PyArray_EquivTypes(
reinterpret_cast<PyArray_Descr*>(a.ptr()),
reinterpret_cast<PyArray_Descr*>(b.ptr())
);
#endif
}
namespace {
namespace pyconv = boost::python::converter;
template <typename T>
class array_scalar_converter {
public:
static PyTypeObject const * get_pytype() {
// This implementation depends on the fact that get_builtin returns pointers to objects
// NumPy has declared statically, and that the typeobj member also refers to a static
// object. That means we don't need to do any reference counting.
// In fact, I'm somewhat concerned that increasing the reference count of any of these
// might cause leaks, because I don't think Boost.Python ever decrements it, but it's
// probably a moot point if everything is actually static.
return reinterpret_cast<PyArray_Descr*>(dtype::get_builtin<T>().ptr())->typeobj;
}
static void * convertible(PyObject * obj) {
if (obj->ob_type == get_pytype()) {
return obj;
} else {
dtype dt(python::detail::borrowed_reference(obj->ob_type));
if (equivalent(dt, dtype::get_builtin<T>())) {
return obj;
}
}
return 0;
}
static void convert(PyObject * obj, pyconv::rvalue_from_python_stage1_data* data) {
void * storage = reinterpret_cast<pyconv::rvalue_from_python_storage<T>*>(data)->storage.bytes;
// We assume std::complex is a "standard layout" here and elsewhere; not guaranteed by
// C++03 standard, but true in every known implementation (and guaranteed by C++11).
PyArray_ScalarAsCtype(obj, reinterpret_cast<T*>(storage));
data->convertible = storage;
}
static void declare() {
pyconv::registry::push_back(
&convertible, &convert, python::type_id<T>()
#ifndef BOOST_PYTHON_NO_PY_SIGNATURES
, &get_pytype
#endif
);
}
};
} // anonymous
void dtype::register_scalar_converters() {
array_scalar_converter<bool>::declare();
array_scalar_converter<npy_uint8>::declare();
array_scalar_converter<npy_int8>::declare();
array_scalar_converter<npy_uint16>::declare();
array_scalar_converter<npy_int16>::declare();
array_scalar_converter<npy_uint32>::declare();
array_scalar_converter<npy_int32>::declare();
#ifdef _MSC_VER
// Since the npy_(u)int32 types are defined as long types and treated
// as being different from the int32 types, these converters must be declared
// explicitely.
array_scalar_converter<boost::uint32_t>::declare();
array_scalar_converter<boost::int32_t>::declare();
#endif
array_scalar_converter<npy_uint64>::declare();
array_scalar_converter<npy_int64>::declare();
array_scalar_converter<float>::declare();
array_scalar_converter<double>::declare();
array_scalar_converter< std::complex<float> >::declare();
array_scalar_converter< std::complex<double> >::declare();
#if NPY_BITSOF_LONGDOUBLE > NPY_BITSOF_DOUBLE
array_scalar_converter<long double>::declare();
array_scalar_converter< std::complex<long double> >::declare();
#endif
}
} // namespace boost::numpy
} // namespace boost

68
src/numpy/matrix.cpp Normal file
View File

@@ -0,0 +1,68 @@
// Copyright Jim Bosch 2010-2012.
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#define BOOST_NUMPY_INTERNAL
#include <boost/numpy/internal.hpp>
#include <boost/numpy/matrix.hpp>
namespace boost
{
namespace numpy
{
namespace detail
{
inline python::object get_matrix_type()
{
python::object module = python::import("numpy");
return module.attr("matrix");
}
} // namespace boost::numpy::detail
} // namespace boost::numpy
namespace python
{
namespace converter
{
PyTypeObject const * object_manager_traits<numpy::matrix>::get_pytype()
{
return reinterpret_cast<PyTypeObject*>(numpy::detail::get_matrix_type().ptr());
}
} // namespace boost::python::converter
} // namespace boost::python
namespace numpy
{
python::object matrix::construct(python::object const & obj, dtype const & dt, bool copy)
{
return numpy::detail::get_matrix_type()(obj, dt, copy);
}
python::object matrix::construct(python::object const & obj, bool copy)
{
return numpy::detail::get_matrix_type()(obj, object(), copy);
}
matrix matrix::view(dtype const & dt) const
{
return matrix(python::detail::new_reference
(PyObject_CallMethod(this->ptr(), const_cast<char*>("view"), const_cast<char*>("O"), dt.ptr())));
}
matrix matrix::copy() const
{
return matrix(python::detail::new_reference
(PyObject_CallMethod(this->ptr(), const_cast<char*>("copy"), const_cast<char*>(""))));
}
matrix matrix::transpose() const
{
return matrix(python::extract<matrix>(ndarray::transpose()));
}
} // namespace boost::numpy
} // namespace boost

281
src/numpy/ndarray.cpp Normal file
View File

@@ -0,0 +1,281 @@
// Copyright Jim Bosch 2010-2012.
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#define BOOST_NUMPY_INTERNAL
#include <boost/numpy/internal.hpp>
#include <boost/scoped_array.hpp>
namespace boost
{
namespace python
{
namespace converter
{
NUMPY_OBJECT_MANAGER_TRAITS_IMPL(PyArray_Type, numpy::ndarray)
} // namespace boost::python::converter
} // namespace boost::python
namespace numpy
{
namespace detail
{
ndarray::bitflag numpy_to_bitflag(int const f)
{
ndarray::bitflag r = ndarray::NONE;
if (f & NPY_C_CONTIGUOUS) r = (r | ndarray::C_CONTIGUOUS);
if (f & NPY_F_CONTIGUOUS) r = (r | ndarray::F_CONTIGUOUS);
if (f & NPY_ALIGNED) r = (r | ndarray::ALIGNED);
if (f & NPY_WRITEABLE) r = (r | ndarray::WRITEABLE);
return r;
}
int bitflag_to_numpy(ndarray::bitflag f)
{
int r = 0;
if (f & ndarray::C_CONTIGUOUS) r |= NPY_C_CONTIGUOUS;
if (f & ndarray::F_CONTIGUOUS) r |= NPY_F_CONTIGUOUS;
if (f & ndarray::ALIGNED) r |= NPY_ALIGNED;
if (f & ndarray::WRITEABLE) r |= NPY_WRITEABLE;
return r;
}
bool is_c_contiguous(std::vector<Py_intptr_t> const & shape,
std::vector<Py_intptr_t> const & strides,
int itemsize)
{
std::vector<Py_intptr_t>::const_reverse_iterator j = strides.rbegin();
int total = itemsize;
for (std::vector<Py_intptr_t>::const_reverse_iterator i = shape.rbegin(); i != shape.rend(); ++i, ++j)
{
if (total != *j) return false;
total *= (*i);
}
return true;
}
bool is_f_contiguous(std::vector<Py_intptr_t> const & shape,
std::vector<Py_intptr_t> const & strides,
int itemsize)
{
std::vector<Py_intptr_t>::const_iterator j = strides.begin();
int total = itemsize;
for (std::vector<Py_intptr_t>::const_iterator i = shape.begin(); i != shape.end(); ++i, ++j)
{
if (total != *j) return false;
total *= (*i);
}
return true;
}
bool is_aligned(std::vector<Py_intptr_t> const & strides,
int itemsize)
{
for (std::vector<Py_intptr_t>::const_iterator i = strides.begin(); i != strides.end(); ++i)
{
if (*i % itemsize) return false;
}
return true;
}
inline PyArray_Descr * incref_dtype(dtype const & dt)
{
Py_INCREF(dt.ptr());
return reinterpret_cast<PyArray_Descr*>(dt.ptr());
}
ndarray from_data_impl(void * data,
dtype const & dt,
python::object const & shape,
python::object const & strides,
python::object const & owner,
bool writeable)
{
std::vector<Py_intptr_t> shape_(len(shape));
std::vector<Py_intptr_t> strides_(len(strides));
if (shape_.size() != strides_.size())
{
PyErr_SetString(PyExc_ValueError, "Length of shape and strides arrays do not match.");
python::throw_error_already_set();
}
for (std::size_t i = 0; i < shape_.size(); ++i)
{
shape_[i] = python::extract<Py_intptr_t>(shape[i]);
strides_[i] = python::extract<Py_intptr_t>(strides[i]);
}
return from_data_impl(data, dt, shape_, strides_, owner, writeable);
}
ndarray from_data_impl(void * data,
dtype const & dt,
std::vector<Py_intptr_t> const & shape,
std::vector<Py_intptr_t> const & strides,
python::object const & owner,
bool writeable)
{
if (shape.size() != strides.size())
{
PyErr_SetString(PyExc_ValueError, "Length of shape and strides arrays do not match.");
python::throw_error_already_set();
}
int itemsize = dt.get_itemsize();
int flags = 0;
if (writeable) flags |= NPY_WRITEABLE;
if (is_c_contiguous(shape, strides, itemsize)) flags |= NPY_C_CONTIGUOUS;
if (is_f_contiguous(shape, strides, itemsize)) flags |= NPY_F_CONTIGUOUS;
if (is_aligned(strides, itemsize)) flags |= NPY_ALIGNED;
ndarray r(python::detail::new_reference
(PyArray_NewFromDescr(&PyArray_Type,
incref_dtype(dt),
shape.size(),
const_cast<Py_intptr_t*>(&shape.front()),
const_cast<Py_intptr_t*>(&strides.front()),
data,
flags,
NULL)));
r.set_base(owner);
return r;
}
} // namespace detail
ndarray ndarray::view(dtype const & dt) const
{
return ndarray(python::detail::new_reference
(PyObject_CallMethod(this->ptr(), const_cast<char*>("view"), const_cast<char*>("O"), dt.ptr())));
}
ndarray ndarray::astype(dtype const & dt) const
{
return ndarray(python::detail::new_reference
(PyObject_CallMethod(this->ptr(), const_cast<char*>("astype"), const_cast<char*>("O"), dt.ptr())));
}
ndarray ndarray::copy() const
{
return ndarray(python::detail::new_reference
(PyObject_CallMethod(this->ptr(), const_cast<char*>("copy"), const_cast<char*>(""))));
}
dtype ndarray::get_dtype() const
{
return dtype(python::detail::borrowed_reference(get_struct()->descr));
}
python::object ndarray::get_base() const
{
if (get_struct()->base == NULL) return object();
return python::object(python::detail::borrowed_reference(get_struct()->base));
}
void ndarray::set_base(object const & base)
{
Py_XDECREF(get_struct()->base);
if (base != object())
{
Py_INCREF(base.ptr());
get_struct()->base = base.ptr();
}
else
{
get_struct()->base = NULL;
}
}
ndarray::bitflag ndarray::get_flags() const
{
return numpy::detail::numpy_to_bitflag(get_struct()->flags);
}
ndarray ndarray::transpose() const
{
return ndarray(python::detail::new_reference
(PyArray_Transpose(reinterpret_cast<PyArrayObject*>(this->ptr()), NULL)));
}
ndarray ndarray::squeeze() const
{
return ndarray(python::detail::new_reference
(PyArray_Squeeze(reinterpret_cast<PyArrayObject*>(this->ptr()))));
}
ndarray ndarray::reshape(python::tuple const & shape) const
{
return ndarray(python::detail::new_reference
(PyArray_Reshape(reinterpret_cast<PyArrayObject*>(this->ptr()), shape.ptr())));
}
python::object ndarray::scalarize() const
{
Py_INCREF(ptr());
return python::object(python::detail::new_reference(PyArray_Return(reinterpret_cast<PyArrayObject*>(ptr()))));
}
ndarray zeros(python::tuple const & shape, dtype const & dt)
{
int nd = len(shape);
boost::scoped_array<Py_intptr_t> dims(new Py_intptr_t[nd]);
for (int n=0; n<nd; ++n) dims[n] = python::extract<Py_intptr_t>(shape[n]);
return ndarray(python::detail::new_reference
(PyArray_Zeros(nd, dims.get(), detail::incref_dtype(dt), 0)));
}
ndarray zeros(int nd, Py_intptr_t const * shape, dtype const & dt)
{
return ndarray(python::detail::new_reference
(PyArray_Zeros(nd, const_cast<Py_intptr_t*>(shape), detail::incref_dtype(dt), 0)));
}
ndarray empty(python::tuple const & shape, dtype const & dt)
{
int nd = len(shape);
boost::scoped_array<Py_intptr_t> dims(new Py_intptr_t[nd]);
for (int n=0; n<nd; ++n) dims[n] = python::extract<Py_intptr_t>(shape[n]);
return ndarray(python::detail::new_reference
(PyArray_Empty(nd, dims.get(), detail::incref_dtype(dt), 0)));
}
ndarray empty(int nd, Py_intptr_t const * shape, dtype const & dt)
{
return ndarray(python::detail::new_reference
(PyArray_Empty(nd, const_cast<Py_intptr_t*>(shape), detail::incref_dtype(dt), 0)));
}
ndarray array(python::object const & obj)
{
return ndarray(python::detail::new_reference
(PyArray_FromAny(obj.ptr(), NULL, 0, 0, NPY_ENSUREARRAY, NULL)));
}
ndarray array(python::object const & obj, dtype const & dt)
{
return ndarray(python::detail::new_reference
(PyArray_FromAny(obj.ptr(), detail::incref_dtype(dt), 0, 0, NPY_ENSUREARRAY, NULL)));
}
ndarray from_object(python::object const & obj, dtype const & dt, int nd_min, int nd_max, ndarray::bitflag flags)
{
int requirements = detail::bitflag_to_numpy(flags);
return ndarray(python::detail::new_reference
(PyArray_FromAny(obj.ptr(),
detail::incref_dtype(dt),
nd_min, nd_max,
requirements,
NULL)));
}
ndarray from_object(python::object const & obj, int nd_min, int nd_max, ndarray::bitflag flags)
{
int requirements = detail::bitflag_to_numpy(flags);
return ndarray(python::detail::new_reference
(PyArray_FromAny(obj.ptr(),
NULL,
nd_min, nd_max,
requirements,
NULL)));
}
}
}

34
src/numpy/numpy.cpp Normal file
View File

@@ -0,0 +1,34 @@
// Copyright Jim Bosch 2010-2012.
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#define BOOST_NUMPY_INTERNAL_MAIN
#include <boost/numpy/internal.hpp>
#include <boost/numpy/dtype.hpp>
namespace boost
{
namespace numpy
{
#if PY_MAJOR_VERSION == 2
static void wrap_import_array() {
import_array();
}
#else
static void * wrap_import_array() {
import_array();
}
#endif
void initialize(bool register_scalar_converters)
{
wrap_import_array();
import_ufunc();
if (register_scalar_converters)
dtype::register_scalar_converters();
}
}
}

40
src/numpy/scalars.cpp Normal file
View File

@@ -0,0 +1,40 @@
// Copyright Jim Bosch 2010-2012.
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#define BOOST_NUMPY_INTERNAL
#include <boost/numpy/internal.hpp>
namespace boost
{
namespace python
{
namespace converter
{
NUMPY_OBJECT_MANAGER_TRAITS_IMPL(PyVoidArrType_Type, numpy::void_)
} // namespace boost::python::converter
} // namespace boost::python
namespace numpy
{
void_::void_(Py_ssize_t size)
: object(python::detail::new_reference
(PyObject_CallFunction((PyObject*)&PyVoidArrType_Type, const_cast<char*>("i"), size)))
{}
void_ void_::view(dtype const & dt) const
{
return void_(python::detail::new_reference
(PyObject_CallMethod(this->ptr(), const_cast<char*>("view"), const_cast<char*>("O"), dt.ptr())));
}
void_ void_::copy() const
{
return void_(python::detail::new_reference
(PyObject_CallMethod(this->ptr(), const_cast<char*>("copy"), const_cast<char*>(""))));
}
}
}

69
src/numpy/ufunc.cpp Normal file
View File

@@ -0,0 +1,69 @@
// Copyright Jim Bosch 2010-2012.
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#define BOOST_NUMPY_INTERNAL
#include <boost/numpy/internal.hpp>
#include <boost/numpy/ufunc.hpp>
namespace boost
{
namespace python
{
namespace converter
{
NUMPY_OBJECT_MANAGER_TRAITS_IMPL(PyArrayMultiIter_Type, numpy::multi_iter)
} // namespace boost::python::converter
} // namespace boost::python
namespace numpy
{
multi_iter make_multi_iter(python::object const & a1)
{
return multi_iter(python::detail::new_reference(PyArray_MultiIterNew(1, a1.ptr())));
}
multi_iter make_multi_iter(python::object const & a1, python::object const & a2)
{
return multi_iter(python::detail::new_reference(PyArray_MultiIterNew(2, a1.ptr(), a2.ptr())));
}
multi_iter make_multi_iter(python::object const & a1, python::object const & a2, python::object const & a3)
{
return multi_iter(python::detail::new_reference(PyArray_MultiIterNew(3, a1.ptr(), a2.ptr(), a3.ptr())));
}
void multi_iter::next()
{
PyArray_MultiIter_NEXT(ptr());
}
bool multi_iter::not_done() const
{
return PyArray_MultiIter_NOTDONE(ptr());
}
char * multi_iter::get_data(int i) const
{
return reinterpret_cast<char*>(PyArray_MultiIter_DATA(ptr(), i));
}
int multi_iter::get_nd() const
{
return reinterpret_cast<PyArrayMultiIterObject*>(ptr())->nd;
}
Py_intptr_t const * multi_iter::get_shape() const
{
return reinterpret_cast<PyArrayMultiIterObject*>(ptr())->dimensions;
}
Py_intptr_t multi_iter::shape(int n) const
{
return reinterpret_cast<PyArrayMultiIterObject*>(ptr())->dimensions[n];
}
}
}