numpy.h 64 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610
  1. /*
  2. pybind11/numpy.h: Basic NumPy support, vectorize() wrapper
  3. Copyright (c) 2016 Wenzel Jakob <wenzel.jakob@epfl.ch>
  4. All rights reserved. Use of this source code is governed by a
  5. BSD-style license that can be found in the LICENSE file.
  6. */
  7. #pragma once
  8. #include "pybind11.h"
  9. #include "complex.h"
  10. #include <numeric>
  11. #include <algorithm>
  12. #include <array>
  13. #include <cstdlib>
  14. #include <cstring>
  15. #include <sstream>
  16. #include <string>
  17. #include <functional>
  18. #include <utility>
  19. #include <vector>
  20. #include <typeindex>
  21. #if defined(_MSC_VER)
  22. # pragma warning(push)
  23. # pragma warning(disable: 4127) // warning C4127: Conditional expression is constant
  24. #endif
  25. /* This will be true on all flat address space platforms and allows us to reduce the
  26. whole npy_intp / ssize_t / Py_intptr_t business down to just ssize_t for all size
  27. and dimension types (e.g. shape, strides, indexing), instead of inflicting this
  28. upon the library user. */
  29. static_assert(sizeof(ssize_t) == sizeof(Py_intptr_t), "ssize_t != Py_intptr_t");
  30. NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
  31. class array; // Forward declaration
  32. NAMESPACE_BEGIN(detail)
  33. template <typename type, typename SFINAE = void> struct npy_format_descriptor;
  34. struct PyArrayDescr_Proxy {
  35. PyObject_HEAD
  36. PyObject *typeobj;
  37. char kind;
  38. char type;
  39. char byteorder;
  40. char flags;
  41. int type_num;
  42. int elsize;
  43. int alignment;
  44. char *subarray;
  45. PyObject *fields;
  46. PyObject *names;
  47. };
  48. struct PyArray_Proxy {
  49. PyObject_HEAD
  50. char *data;
  51. int nd;
  52. ssize_t *dimensions;
  53. ssize_t *strides;
  54. PyObject *base;
  55. PyObject *descr;
  56. int flags;
  57. };
  58. struct PyVoidScalarObject_Proxy {
  59. PyObject_VAR_HEAD
  60. char *obval;
  61. PyArrayDescr_Proxy *descr;
  62. int flags;
  63. PyObject *base;
  64. };
  65. struct numpy_type_info {
  66. PyObject* dtype_ptr;
  67. std::string format_str;
  68. };
  69. struct numpy_internals {
  70. std::unordered_map<std::type_index, numpy_type_info> registered_dtypes;
  71. numpy_type_info *get_type_info(const std::type_info& tinfo, bool throw_if_missing = true) {
  72. auto it = registered_dtypes.find(std::type_index(tinfo));
  73. if (it != registered_dtypes.end())
  74. return &(it->second);
  75. if (throw_if_missing)
  76. pybind11_fail(std::string("NumPy type info missing for ") + tinfo.name());
  77. return nullptr;
  78. }
  79. template<typename T> numpy_type_info *get_type_info(bool throw_if_missing = true) {
  80. return get_type_info(typeid(typename std::remove_cv<T>::type), throw_if_missing);
  81. }
  82. };
  83. inline PYBIND11_NOINLINE void load_numpy_internals(numpy_internals* &ptr) {
  84. ptr = &get_or_create_shared_data<numpy_internals>("_numpy_internals");
  85. }
  86. inline numpy_internals& get_numpy_internals() {
  87. static numpy_internals* ptr = nullptr;
  88. if (!ptr)
  89. load_numpy_internals(ptr);
  90. return *ptr;
  91. }
  92. struct npy_api {
  93. enum constants {
  94. NPY_ARRAY_C_CONTIGUOUS_ = 0x0001,
  95. NPY_ARRAY_F_CONTIGUOUS_ = 0x0002,
  96. NPY_ARRAY_OWNDATA_ = 0x0004,
  97. NPY_ARRAY_FORCECAST_ = 0x0010,
  98. NPY_ARRAY_ENSUREARRAY_ = 0x0040,
  99. NPY_ARRAY_ALIGNED_ = 0x0100,
  100. NPY_ARRAY_WRITEABLE_ = 0x0400,
  101. NPY_BOOL_ = 0,
  102. NPY_BYTE_, NPY_UBYTE_,
  103. NPY_SHORT_, NPY_USHORT_,
  104. NPY_INT_, NPY_UINT_,
  105. NPY_LONG_, NPY_ULONG_,
  106. NPY_LONGLONG_, NPY_ULONGLONG_,
  107. NPY_FLOAT_, NPY_DOUBLE_, NPY_LONGDOUBLE_,
  108. NPY_CFLOAT_, NPY_CDOUBLE_, NPY_CLONGDOUBLE_,
  109. NPY_OBJECT_ = 17,
  110. NPY_STRING_, NPY_UNICODE_, NPY_VOID_
  111. };
  112. typedef struct {
  113. Py_intptr_t *ptr;
  114. int len;
  115. } PyArray_Dims;
  116. static npy_api& get() {
  117. static npy_api api = lookup();
  118. return api;
  119. }
  120. bool PyArray_Check_(PyObject *obj) const {
  121. return (bool) PyObject_TypeCheck(obj, PyArray_Type_);
  122. }
  123. bool PyArrayDescr_Check_(PyObject *obj) const {
  124. return (bool) PyObject_TypeCheck(obj, PyArrayDescr_Type_);
  125. }
  126. unsigned int (*PyArray_GetNDArrayCFeatureVersion_)();
  127. PyObject *(*PyArray_DescrFromType_)(int);
  128. PyObject *(*PyArray_NewFromDescr_)
  129. (PyTypeObject *, PyObject *, int, Py_intptr_t *,
  130. Py_intptr_t *, void *, int, PyObject *);
  131. PyObject *(*PyArray_DescrNewFromType_)(int);
  132. int (*PyArray_CopyInto_)(PyObject *, PyObject *);
  133. PyObject *(*PyArray_NewCopy_)(PyObject *, int);
  134. PyTypeObject *PyArray_Type_;
  135. PyTypeObject *PyVoidArrType_Type_;
  136. PyTypeObject *PyArrayDescr_Type_;
  137. PyObject *(*PyArray_DescrFromScalar_)(PyObject *);
  138. PyObject *(*PyArray_FromAny_) (PyObject *, PyObject *, int, int, int, PyObject *);
  139. int (*PyArray_DescrConverter_) (PyObject *, PyObject **);
  140. bool (*PyArray_EquivTypes_) (PyObject *, PyObject *);
  141. int (*PyArray_GetArrayParamsFromObject_)(PyObject *, PyObject *, char, PyObject **, int *,
  142. Py_ssize_t *, PyObject **, PyObject *);
  143. PyObject *(*PyArray_Squeeze_)(PyObject *);
  144. int (*PyArray_SetBaseObject_)(PyObject *, PyObject *);
  145. PyObject* (*PyArray_Resize_)(PyObject*, PyArray_Dims*, int, int);
  146. private:
  147. enum functions {
  148. API_PyArray_GetNDArrayCFeatureVersion = 211,
  149. API_PyArray_Type = 2,
  150. API_PyArrayDescr_Type = 3,
  151. API_PyVoidArrType_Type = 39,
  152. API_PyArray_DescrFromType = 45,
  153. API_PyArray_DescrFromScalar = 57,
  154. API_PyArray_FromAny = 69,
  155. API_PyArray_Resize = 80,
  156. API_PyArray_CopyInto = 82,
  157. API_PyArray_NewCopy = 85,
  158. API_PyArray_NewFromDescr = 94,
  159. API_PyArray_DescrNewFromType = 9,
  160. API_PyArray_DescrConverter = 174,
  161. API_PyArray_EquivTypes = 182,
  162. API_PyArray_GetArrayParamsFromObject = 278,
  163. API_PyArray_Squeeze = 136,
  164. API_PyArray_SetBaseObject = 282
  165. };
  166. static npy_api lookup() {
  167. module m = module::import("numpy.core.multiarray");
  168. auto c = m.attr("_ARRAY_API");
  169. #if PY_MAJOR_VERSION >= 3
  170. void **api_ptr = (void **) PyCapsule_GetPointer(c.ptr(), NULL);
  171. #else
  172. void **api_ptr = (void **) PyCObject_AsVoidPtr(c.ptr());
  173. #endif
  174. npy_api api;
  175. #define DECL_NPY_API(Func) api.Func##_ = (decltype(api.Func##_)) api_ptr[API_##Func];
  176. DECL_NPY_API(PyArray_GetNDArrayCFeatureVersion);
  177. if (api.PyArray_GetNDArrayCFeatureVersion_() < 0x7)
  178. pybind11_fail("pybind11 numpy support requires numpy >= 1.7.0");
  179. DECL_NPY_API(PyArray_Type);
  180. DECL_NPY_API(PyVoidArrType_Type);
  181. DECL_NPY_API(PyArrayDescr_Type);
  182. DECL_NPY_API(PyArray_DescrFromType);
  183. DECL_NPY_API(PyArray_DescrFromScalar);
  184. DECL_NPY_API(PyArray_FromAny);
  185. DECL_NPY_API(PyArray_Resize);
  186. DECL_NPY_API(PyArray_CopyInto);
  187. DECL_NPY_API(PyArray_NewCopy);
  188. DECL_NPY_API(PyArray_NewFromDescr);
  189. DECL_NPY_API(PyArray_DescrNewFromType);
  190. DECL_NPY_API(PyArray_DescrConverter);
  191. DECL_NPY_API(PyArray_EquivTypes);
  192. DECL_NPY_API(PyArray_GetArrayParamsFromObject);
  193. DECL_NPY_API(PyArray_Squeeze);
  194. DECL_NPY_API(PyArray_SetBaseObject);
  195. #undef DECL_NPY_API
  196. return api;
  197. }
  198. };
  199. inline PyArray_Proxy* array_proxy(void* ptr) {
  200. return reinterpret_cast<PyArray_Proxy*>(ptr);
  201. }
  202. inline const PyArray_Proxy* array_proxy(const void* ptr) {
  203. return reinterpret_cast<const PyArray_Proxy*>(ptr);
  204. }
  205. inline PyArrayDescr_Proxy* array_descriptor_proxy(PyObject* ptr) {
  206. return reinterpret_cast<PyArrayDescr_Proxy*>(ptr);
  207. }
  208. inline const PyArrayDescr_Proxy* array_descriptor_proxy(const PyObject* ptr) {
  209. return reinterpret_cast<const PyArrayDescr_Proxy*>(ptr);
  210. }
  211. inline bool check_flags(const void* ptr, int flag) {
  212. return (flag == (array_proxy(ptr)->flags & flag));
  213. }
  214. template <typename T> struct is_std_array : std::false_type { };
  215. template <typename T, size_t N> struct is_std_array<std::array<T, N>> : std::true_type { };
  216. template <typename T> struct is_complex : std::false_type { };
  217. template <typename T> struct is_complex<std::complex<T>> : std::true_type { };
  218. template <typename T> struct array_info_scalar {
  219. typedef T type;
  220. static constexpr bool is_array = false;
  221. static constexpr bool is_empty = false;
  222. static constexpr auto extents = _("");
  223. static void append_extents(list& /* shape */) { }
  224. };
  225. // Computes underlying type and a comma-separated list of extents for array
  226. // types (any mix of std::array and built-in arrays). An array of char is
  227. // treated as scalar because it gets special handling.
  228. template <typename T> struct array_info : array_info_scalar<T> { };
  229. template <typename T, size_t N> struct array_info<std::array<T, N>> {
  230. using type = typename array_info<T>::type;
  231. static constexpr bool is_array = true;
  232. static constexpr bool is_empty = (N == 0) || array_info<T>::is_empty;
  233. static constexpr size_t extent = N;
  234. // appends the extents to shape
  235. static void append_extents(list& shape) {
  236. shape.append(N);
  237. array_info<T>::append_extents(shape);
  238. }
  239. static constexpr auto extents = _<array_info<T>::is_array>(
  240. concat(_<N>(), array_info<T>::extents), _<N>()
  241. );
  242. };
  243. // For numpy we have special handling for arrays of characters, so we don't include
  244. // the size in the array extents.
  245. template <size_t N> struct array_info<char[N]> : array_info_scalar<char[N]> { };
  246. template <size_t N> struct array_info<std::array<char, N>> : array_info_scalar<std::array<char, N>> { };
  247. template <typename T, size_t N> struct array_info<T[N]> : array_info<std::array<T, N>> { };
  248. template <typename T> using remove_all_extents_t = typename array_info<T>::type;
  249. template <typename T> using is_pod_struct = all_of<
  250. std::is_standard_layout<T>, // since we're accessing directly in memory we need a standard layout type
  251. #if !defined(__GNUG__) || defined(_LIBCPP_VERSION) || defined(_GLIBCXX_USE_CXX11_ABI)
  252. // _GLIBCXX_USE_CXX11_ABI indicates that we're using libstdc++ from GCC 5 or newer, independent
  253. // of the actual compiler (Clang can also use libstdc++, but it always defines __GNUC__ == 4).
  254. std::is_trivially_copyable<T>,
  255. #else
  256. // GCC 4 doesn't implement is_trivially_copyable, so approximate it
  257. std::is_trivially_destructible<T>,
  258. satisfies_any_of<T, std::has_trivial_copy_constructor, std::has_trivial_copy_assign>,
  259. #endif
  260. satisfies_none_of<T, std::is_reference, std::is_array, is_std_array, std::is_arithmetic, is_complex, std::is_enum>
  261. >;
  262. template <ssize_t Dim = 0, typename Strides> ssize_t byte_offset_unsafe(const Strides &) { return 0; }
  263. template <ssize_t Dim = 0, typename Strides, typename... Ix>
  264. ssize_t byte_offset_unsafe(const Strides &strides, ssize_t i, Ix... index) {
  265. return i * strides[Dim] + byte_offset_unsafe<Dim + 1>(strides, index...);
  266. }
  267. /**
  268. * Proxy class providing unsafe, unchecked const access to array data. This is constructed through
  269. * the `unchecked<T, N>()` method of `array` or the `unchecked<N>()` method of `array_t<T>`. `Dims`
  270. * will be -1 for dimensions determined at runtime.
  271. */
  272. template <typename T, ssize_t Dims>
  273. class unchecked_reference {
  274. protected:
  275. static constexpr bool Dynamic = Dims < 0;
  276. const unsigned char *data_;
  277. // Storing the shape & strides in local variables (i.e. these arrays) allows the compiler to
  278. // make large performance gains on big, nested loops, but requires compile-time dimensions
  279. conditional_t<Dynamic, const ssize_t *, std::array<ssize_t, (size_t) Dims>>
  280. shape_, strides_;
  281. const ssize_t dims_;
  282. friend class pybind11::array;
  283. // Constructor for compile-time dimensions:
  284. template <bool Dyn = Dynamic>
  285. unchecked_reference(const void *data, const ssize_t *shape, const ssize_t *strides, enable_if_t<!Dyn, ssize_t>)
  286. : data_{reinterpret_cast<const unsigned char *>(data)}, dims_{Dims} {
  287. for (size_t i = 0; i < (size_t) dims_; i++) {
  288. shape_[i] = shape[i];
  289. strides_[i] = strides[i];
  290. }
  291. }
  292. // Constructor for runtime dimensions:
  293. template <bool Dyn = Dynamic>
  294. unchecked_reference(const void *data, const ssize_t *shape, const ssize_t *strides, enable_if_t<Dyn, ssize_t> dims)
  295. : data_{reinterpret_cast<const unsigned char *>(data)}, shape_{shape}, strides_{strides}, dims_{dims} {}
  296. public:
  297. /**
  298. * Unchecked const reference access to data at the given indices. For a compile-time known
  299. * number of dimensions, this requires the correct number of arguments; for run-time
  300. * dimensionality, this is not checked (and so is up to the caller to use safely).
  301. */
  302. template <typename... Ix> const T &operator()(Ix... index) const {
  303. static_assert(ssize_t{sizeof...(Ix)} == Dims || Dynamic,
  304. "Invalid number of indices for unchecked array reference");
  305. return *reinterpret_cast<const T *>(data_ + byte_offset_unsafe(strides_, ssize_t(index)...));
  306. }
  307. /**
  308. * Unchecked const reference access to data; this operator only participates if the reference
  309. * is to a 1-dimensional array. When present, this is exactly equivalent to `obj(index)`.
  310. */
  311. template <ssize_t D = Dims, typename = enable_if_t<D == 1 || Dynamic>>
  312. const T &operator[](ssize_t index) const { return operator()(index); }
  313. /// Pointer access to the data at the given indices.
  314. template <typename... Ix> const T *data(Ix... ix) const { return &operator()(ssize_t(ix)...); }
  315. /// Returns the item size, i.e. sizeof(T)
  316. constexpr static ssize_t itemsize() { return sizeof(T); }
  317. /// Returns the shape (i.e. size) of dimension `dim`
  318. ssize_t shape(ssize_t dim) const { return shape_[(size_t) dim]; }
  319. /// Returns the number of dimensions of the array
  320. ssize_t ndim() const { return dims_; }
  321. /// Returns the total number of elements in the referenced array, i.e. the product of the shapes
  322. template <bool Dyn = Dynamic>
  323. enable_if_t<!Dyn, ssize_t> size() const {
  324. return std::accumulate(shape_.begin(), shape_.end(), (ssize_t) 1, std::multiplies<ssize_t>());
  325. }
  326. template <bool Dyn = Dynamic>
  327. enable_if_t<Dyn, ssize_t> size() const {
  328. return std::accumulate(shape_, shape_ + ndim(), (ssize_t) 1, std::multiplies<ssize_t>());
  329. }
  330. /// Returns the total number of bytes used by the referenced data. Note that the actual span in
  331. /// memory may be larger if the referenced array has non-contiguous strides (e.g. for a slice).
  332. ssize_t nbytes() const {
  333. return size() * itemsize();
  334. }
  335. };
  336. template <typename T, ssize_t Dims>
  337. class unchecked_mutable_reference : public unchecked_reference<T, Dims> {
  338. friend class pybind11::array;
  339. using ConstBase = unchecked_reference<T, Dims>;
  340. using ConstBase::ConstBase;
  341. using ConstBase::Dynamic;
  342. public:
  343. /// Mutable, unchecked access to data at the given indices.
  344. template <typename... Ix> T& operator()(Ix... index) {
  345. static_assert(ssize_t{sizeof...(Ix)} == Dims || Dynamic,
  346. "Invalid number of indices for unchecked array reference");
  347. return const_cast<T &>(ConstBase::operator()(index...));
  348. }
  349. /**
  350. * Mutable, unchecked access data at the given index; this operator only participates if the
  351. * reference is to a 1-dimensional array (or has runtime dimensions). When present, this is
  352. * exactly equivalent to `obj(index)`.
  353. */
  354. template <ssize_t D = Dims, typename = enable_if_t<D == 1 || Dynamic>>
  355. T &operator[](ssize_t index) { return operator()(index); }
  356. /// Mutable pointer access to the data at the given indices.
  357. template <typename... Ix> T *mutable_data(Ix... ix) { return &operator()(ssize_t(ix)...); }
  358. };
  359. template <typename T, ssize_t Dim>
  360. struct type_caster<unchecked_reference<T, Dim>> {
  361. static_assert(Dim == 0 && Dim > 0 /* always fail */, "unchecked array proxy object is not castable");
  362. };
  363. template <typename T, ssize_t Dim>
  364. struct type_caster<unchecked_mutable_reference<T, Dim>> : type_caster<unchecked_reference<T, Dim>> {};
  365. NAMESPACE_END(detail)
  366. class dtype : public object {
  367. public:
  368. PYBIND11_OBJECT_DEFAULT(dtype, object, detail::npy_api::get().PyArrayDescr_Check_);
  369. explicit dtype(const buffer_info &info) {
  370. dtype descr(_dtype_from_pep3118()(PYBIND11_STR_TYPE(info.format)));
  371. // If info.itemsize == 0, use the value calculated from the format string
  372. m_ptr = descr.strip_padding(info.itemsize ? info.itemsize : descr.itemsize()).release().ptr();
  373. }
  374. explicit dtype(const std::string &format) {
  375. m_ptr = from_args(pybind11::str(format)).release().ptr();
  376. }
  377. dtype(const char *format) : dtype(std::string(format)) { }
  378. dtype(list names, list formats, list offsets, ssize_t itemsize) {
  379. dict args;
  380. args["names"] = names;
  381. args["formats"] = formats;
  382. args["offsets"] = offsets;
  383. args["itemsize"] = pybind11::int_(itemsize);
  384. m_ptr = from_args(args).release().ptr();
  385. }
  386. /// This is essentially the same as calling numpy.dtype(args) in Python.
  387. static dtype from_args(object args) {
  388. PyObject *ptr = nullptr;
  389. if (!detail::npy_api::get().PyArray_DescrConverter_(args.ptr(), &ptr) || !ptr)
  390. throw error_already_set();
  391. return reinterpret_steal<dtype>(ptr);
  392. }
  393. /// Return dtype associated with a C++ type.
  394. template <typename T> static dtype of() {
  395. return detail::npy_format_descriptor<typename std::remove_cv<T>::type>::dtype();
  396. }
  397. /// Size of the data type in bytes.
  398. ssize_t itemsize() const {
  399. return detail::array_descriptor_proxy(m_ptr)->elsize;
  400. }
  401. /// Returns true for structured data types.
  402. bool has_fields() const {
  403. return detail::array_descriptor_proxy(m_ptr)->names != nullptr;
  404. }
  405. /// Single-character type code.
  406. char kind() const {
  407. return detail::array_descriptor_proxy(m_ptr)->kind;
  408. }
  409. private:
  410. static object _dtype_from_pep3118() {
  411. static PyObject *obj = module::import("numpy.core._internal")
  412. .attr("_dtype_from_pep3118").cast<object>().release().ptr();
  413. return reinterpret_borrow<object>(obj);
  414. }
  415. dtype strip_padding(ssize_t itemsize) {
  416. // Recursively strip all void fields with empty names that are generated for
  417. // padding fields (as of NumPy v1.11).
  418. if (!has_fields())
  419. return *this;
  420. struct field_descr { PYBIND11_STR_TYPE name; object format; pybind11::int_ offset; };
  421. std::vector<field_descr> field_descriptors;
  422. for (auto field : attr("fields").attr("items")()) {
  423. auto spec = field.cast<tuple>();
  424. auto name = spec[0].cast<pybind11::str>();
  425. auto format = spec[1].cast<tuple>()[0].cast<dtype>();
  426. auto offset = spec[1].cast<tuple>()[1].cast<pybind11::int_>();
  427. if (!len(name) && format.kind() == 'V')
  428. continue;
  429. field_descriptors.push_back({(PYBIND11_STR_TYPE) name, format.strip_padding(format.itemsize()), offset});
  430. }
  431. std::sort(field_descriptors.begin(), field_descriptors.end(),
  432. [](const field_descr& a, const field_descr& b) {
  433. return a.offset.cast<int>() < b.offset.cast<int>();
  434. });
  435. list names, formats, offsets;
  436. for (auto& descr : field_descriptors) {
  437. names.append(descr.name);
  438. formats.append(descr.format);
  439. offsets.append(descr.offset);
  440. }
  441. return dtype(names, formats, offsets, itemsize);
  442. }
  443. };
  444. class array : public buffer {
  445. public:
  446. PYBIND11_OBJECT_CVT(array, buffer, detail::npy_api::get().PyArray_Check_, raw_array)
  447. enum {
  448. c_style = detail::npy_api::NPY_ARRAY_C_CONTIGUOUS_,
  449. f_style = detail::npy_api::NPY_ARRAY_F_CONTIGUOUS_,
  450. forcecast = detail::npy_api::NPY_ARRAY_FORCECAST_
  451. };
  452. array() : array({{0}}, static_cast<const double *>(nullptr)) {}
  453. using ShapeContainer = detail::any_container<ssize_t>;
  454. using StridesContainer = detail::any_container<ssize_t>;
  455. // Constructs an array taking shape/strides from arbitrary container types
  456. array(const pybind11::dtype &dt, ShapeContainer shape, StridesContainer strides,
  457. const void *ptr = nullptr, handle base = handle()) {
  458. if (strides->empty())
  459. *strides = c_strides(*shape, dt.itemsize());
  460. auto ndim = shape->size();
  461. if (ndim != strides->size())
  462. pybind11_fail("NumPy: shape ndim doesn't match strides ndim");
  463. auto descr = dt;
  464. int flags = 0;
  465. if (base && ptr) {
  466. if (isinstance<array>(base))
  467. /* Copy flags from base (except ownership bit) */
  468. flags = reinterpret_borrow<array>(base).flags() & ~detail::npy_api::NPY_ARRAY_OWNDATA_;
  469. else
  470. /* Writable by default, easy to downgrade later on if needed */
  471. flags = detail::npy_api::NPY_ARRAY_WRITEABLE_;
  472. }
  473. auto &api = detail::npy_api::get();
  474. auto tmp = reinterpret_steal<object>(api.PyArray_NewFromDescr_(
  475. api.PyArray_Type_, descr.release().ptr(), (int) ndim, shape->data(), strides->data(),
  476. const_cast<void *>(ptr), flags, nullptr));
  477. if (!tmp)
  478. throw error_already_set();
  479. if (ptr) {
  480. if (base) {
  481. api.PyArray_SetBaseObject_(tmp.ptr(), base.inc_ref().ptr());
  482. } else {
  483. tmp = reinterpret_steal<object>(api.PyArray_NewCopy_(tmp.ptr(), -1 /* any order */));
  484. }
  485. }
  486. m_ptr = tmp.release().ptr();
  487. }
  488. array(const pybind11::dtype &dt, ShapeContainer shape, const void *ptr = nullptr, handle base = handle())
  489. : array(dt, std::move(shape), {}, ptr, base) { }
  490. template <typename T, typename = detail::enable_if_t<std::is_integral<T>::value && !std::is_same<bool, T>::value>>
  491. array(const pybind11::dtype &dt, T count, const void *ptr = nullptr, handle base = handle())
  492. : array(dt, {{count}}, ptr, base) { }
  493. template <typename T>
  494. array(ShapeContainer shape, StridesContainer strides, const T *ptr, handle base = handle())
  495. : array(pybind11::dtype::of<T>(), std::move(shape), std::move(strides), ptr, base) { }
  496. template <typename T>
  497. array(ShapeContainer shape, const T *ptr, handle base = handle())
  498. : array(std::move(shape), {}, ptr, base) { }
  499. template <typename T>
  500. explicit array(ssize_t count, const T *ptr, handle base = handle()) : array({count}, {}, ptr, base) { }
  501. explicit array(const buffer_info &info)
  502. : array(pybind11::dtype(info), info.shape, info.strides, info.ptr) { }
  503. /// Array descriptor (dtype)
  504. pybind11::dtype dtype() const {
  505. return reinterpret_borrow<pybind11::dtype>(detail::array_proxy(m_ptr)->descr);
  506. }
  507. /// Total number of elements
  508. ssize_t size() const {
  509. return std::accumulate(shape(), shape() + ndim(), (ssize_t) 1, std::multiplies<ssize_t>());
  510. }
  511. /// Byte size of a single element
  512. ssize_t itemsize() const {
  513. return detail::array_descriptor_proxy(detail::array_proxy(m_ptr)->descr)->elsize;
  514. }
  515. /// Total number of bytes
  516. ssize_t nbytes() const {
  517. return size() * itemsize();
  518. }
  519. /// Number of dimensions
  520. ssize_t ndim() const {
  521. return detail::array_proxy(m_ptr)->nd;
  522. }
  523. /// Base object
  524. object base() const {
  525. return reinterpret_borrow<object>(detail::array_proxy(m_ptr)->base);
  526. }
  527. /// Dimensions of the array
  528. const ssize_t* shape() const {
  529. return detail::array_proxy(m_ptr)->dimensions;
  530. }
  531. /// Dimension along a given axis
  532. ssize_t shape(ssize_t dim) const {
  533. if (dim >= ndim())
  534. fail_dim_check(dim, "invalid axis");
  535. return shape()[dim];
  536. }
  537. /// Strides of the array
  538. const ssize_t* strides() const {
  539. return detail::array_proxy(m_ptr)->strides;
  540. }
  541. /// Stride along a given axis
  542. ssize_t strides(ssize_t dim) const {
  543. if (dim >= ndim())
  544. fail_dim_check(dim, "invalid axis");
  545. return strides()[dim];
  546. }
  547. /// Return the NumPy array flags
  548. int flags() const {
  549. return detail::array_proxy(m_ptr)->flags;
  550. }
  551. /// If set, the array is writeable (otherwise the buffer is read-only)
  552. bool writeable() const {
  553. return detail::check_flags(m_ptr, detail::npy_api::NPY_ARRAY_WRITEABLE_);
  554. }
  555. /// If set, the array owns the data (will be freed when the array is deleted)
  556. bool owndata() const {
  557. return detail::check_flags(m_ptr, detail::npy_api::NPY_ARRAY_OWNDATA_);
  558. }
  559. /// Pointer to the contained data. If index is not provided, points to the
  560. /// beginning of the buffer. May throw if the index would lead to out of bounds access.
  561. template<typename... Ix> const void* data(Ix... index) const {
  562. return static_cast<const void *>(detail::array_proxy(m_ptr)->data + offset_at(index...));
  563. }
  564. /// Mutable pointer to the contained data. If index is not provided, points to the
  565. /// beginning of the buffer. May throw if the index would lead to out of bounds access.
  566. /// May throw if the array is not writeable.
  567. template<typename... Ix> void* mutable_data(Ix... index) {
  568. check_writeable();
  569. return static_cast<void *>(detail::array_proxy(m_ptr)->data + offset_at(index...));
  570. }
  571. /// Byte offset from beginning of the array to a given index (full or partial).
  572. /// May throw if the index would lead to out of bounds access.
  573. template<typename... Ix> ssize_t offset_at(Ix... index) const {
  574. if ((ssize_t) sizeof...(index) > ndim())
  575. fail_dim_check(sizeof...(index), "too many indices for an array");
  576. return byte_offset(ssize_t(index)...);
  577. }
  578. ssize_t offset_at() const { return 0; }
  579. /// Item count from beginning of the array to a given index (full or partial).
  580. /// May throw if the index would lead to out of bounds access.
  581. template<typename... Ix> ssize_t index_at(Ix... index) const {
  582. return offset_at(index...) / itemsize();
  583. }
  584. /**
  585. * Returns a proxy object that provides access to the array's data without bounds or
  586. * dimensionality checking. Will throw if the array is missing the `writeable` flag. Use with
  587. * care: the array must not be destroyed or reshaped for the duration of the returned object,
  588. * and the caller must take care not to access invalid dimensions or dimension indices.
  589. */
  590. template <typename T, ssize_t Dims = -1> detail::unchecked_mutable_reference<T, Dims> mutable_unchecked() & {
  591. if (Dims >= 0 && ndim() != Dims)
  592. throw std::domain_error("array has incorrect number of dimensions: " + std::to_string(ndim()) +
  593. "; expected " + std::to_string(Dims));
  594. return detail::unchecked_mutable_reference<T, Dims>(mutable_data(), shape(), strides(), ndim());
  595. }
  596. /**
  597. * Returns a proxy object that provides const access to the array's data without bounds or
  598. * dimensionality checking. Unlike `mutable_unchecked()`, this does not require that the
  599. * underlying array have the `writable` flag. Use with care: the array must not be destroyed or
  600. * reshaped for the duration of the returned object, and the caller must take care not to access
  601. * invalid dimensions or dimension indices.
  602. */
  603. template <typename T, ssize_t Dims = -1> detail::unchecked_reference<T, Dims> unchecked() const & {
  604. if (Dims >= 0 && ndim() != Dims)
  605. throw std::domain_error("array has incorrect number of dimensions: " + std::to_string(ndim()) +
  606. "; expected " + std::to_string(Dims));
  607. return detail::unchecked_reference<T, Dims>(data(), shape(), strides(), ndim());
  608. }
  609. /// Return a new view with all of the dimensions of length 1 removed
  610. array squeeze() {
  611. auto& api = detail::npy_api::get();
  612. return reinterpret_steal<array>(api.PyArray_Squeeze_(m_ptr));
  613. }
  614. /// Resize array to given shape
  615. /// If refcheck is true and more that one reference exist to this array
  616. /// then resize will succeed only if it makes a reshape, i.e. original size doesn't change
  617. void resize(ShapeContainer new_shape, bool refcheck = true) {
  618. detail::npy_api::PyArray_Dims d = {
  619. new_shape->data(), int(new_shape->size())
  620. };
  621. // try to resize, set ordering param to -1 cause it's not used anyway
  622. object new_array = reinterpret_steal<object>(
  623. detail::npy_api::get().PyArray_Resize_(m_ptr, &d, int(refcheck), -1)
  624. );
  625. if (!new_array) throw error_already_set();
  626. if (isinstance<array>(new_array)) { *this = std::move(new_array); }
  627. }
  628. /// Ensure that the argument is a NumPy array
  629. /// In case of an error, nullptr is returned and the Python error is cleared.
  630. static array ensure(handle h, int ExtraFlags = 0) {
  631. auto result = reinterpret_steal<array>(raw_array(h.ptr(), ExtraFlags));
  632. if (!result)
  633. PyErr_Clear();
  634. return result;
  635. }
  636. protected:
  637. template<typename, typename> friend struct detail::npy_format_descriptor;
  638. void fail_dim_check(ssize_t dim, const std::string& msg) const {
  639. throw index_error(msg + ": " + std::to_string(dim) +
  640. " (ndim = " + std::to_string(ndim()) + ")");
  641. }
  642. template<typename... Ix> ssize_t byte_offset(Ix... index) const {
  643. check_dimensions(index...);
  644. return detail::byte_offset_unsafe(strides(), ssize_t(index)...);
  645. }
  646. void check_writeable() const {
  647. if (!writeable())
  648. throw std::domain_error("array is not writeable");
  649. }
  650. // Default, C-style strides
  651. static std::vector<ssize_t> c_strides(const std::vector<ssize_t> &shape, ssize_t itemsize) {
  652. auto ndim = shape.size();
  653. std::vector<ssize_t> strides(ndim, itemsize);
  654. if (ndim > 0)
  655. for (size_t i = ndim - 1; i > 0; --i)
  656. strides[i - 1] = strides[i] * shape[i];
  657. return strides;
  658. }
  659. // F-style strides; default when constructing an array_t with `ExtraFlags & f_style`
  660. static std::vector<ssize_t> f_strides(const std::vector<ssize_t> &shape, ssize_t itemsize) {
  661. auto ndim = shape.size();
  662. std::vector<ssize_t> strides(ndim, itemsize);
  663. for (size_t i = 1; i < ndim; ++i)
  664. strides[i] = strides[i - 1] * shape[i - 1];
  665. return strides;
  666. }
  667. template<typename... Ix> void check_dimensions(Ix... index) const {
  668. check_dimensions_impl(ssize_t(0), shape(), ssize_t(index)...);
  669. }
  670. void check_dimensions_impl(ssize_t, const ssize_t*) const { }
  671. template<typename... Ix> void check_dimensions_impl(ssize_t axis, const ssize_t* shape, ssize_t i, Ix... index) const {
  672. if (i >= *shape) {
  673. throw index_error(std::string("index ") + std::to_string(i) +
  674. " is out of bounds for axis " + std::to_string(axis) +
  675. " with size " + std::to_string(*shape));
  676. }
  677. check_dimensions_impl(axis + 1, shape + 1, index...);
  678. }
  679. /// Create array from any object -- always returns a new reference
  680. static PyObject *raw_array(PyObject *ptr, int ExtraFlags = 0) {
  681. if (ptr == nullptr) {
  682. PyErr_SetString(PyExc_ValueError, "cannot create a pybind11::array from a nullptr");
  683. return nullptr;
  684. }
  685. return detail::npy_api::get().PyArray_FromAny_(
  686. ptr, nullptr, 0, 0, detail::npy_api::NPY_ARRAY_ENSUREARRAY_ | ExtraFlags, nullptr);
  687. }
  688. };
  689. template <typename T, int ExtraFlags = array::forcecast> class array_t : public array {
  690. private:
  691. struct private_ctor {};
  692. // Delegating constructor needed when both moving and accessing in the same constructor
  693. array_t(private_ctor, ShapeContainer &&shape, StridesContainer &&strides, const T *ptr, handle base)
  694. : array(std::move(shape), std::move(strides), ptr, base) {}
  695. public:
  696. static_assert(!detail::array_info<T>::is_array, "Array types cannot be used with array_t");
  697. using value_type = T;
  698. array_t() : array(0, static_cast<const T *>(nullptr)) {}
  699. array_t(handle h, borrowed_t) : array(h, borrowed_t{}) { }
  700. array_t(handle h, stolen_t) : array(h, stolen_t{}) { }
  701. PYBIND11_DEPRECATED("Use array_t<T>::ensure() instead")
  702. array_t(handle h, bool is_borrowed) : array(raw_array_t(h.ptr()), stolen_t{}) {
  703. if (!m_ptr) PyErr_Clear();
  704. if (!is_borrowed) Py_XDECREF(h.ptr());
  705. }
  706. array_t(const object &o) : array(raw_array_t(o.ptr()), stolen_t{}) {
  707. if (!m_ptr) throw error_already_set();
  708. }
  709. explicit array_t(const buffer_info& info) : array(info) { }
  710. array_t(ShapeContainer shape, StridesContainer strides, const T *ptr = nullptr, handle base = handle())
  711. : array(std::move(shape), std::move(strides), ptr, base) { }
  712. explicit array_t(ShapeContainer shape, const T *ptr = nullptr, handle base = handle())
  713. : array_t(private_ctor{}, std::move(shape),
  714. ExtraFlags & f_style ? f_strides(*shape, itemsize()) : c_strides(*shape, itemsize()),
  715. ptr, base) { }
  716. explicit array_t(size_t count, const T *ptr = nullptr, handle base = handle())
  717. : array({count}, {}, ptr, base) { }
  718. constexpr ssize_t itemsize() const {
  719. return sizeof(T);
  720. }
  721. template<typename... Ix> ssize_t index_at(Ix... index) const {
  722. return offset_at(index...) / itemsize();
  723. }
  724. template<typename... Ix> const T* data(Ix... index) const {
  725. return static_cast<const T*>(array::data(index...));
  726. }
  727. template<typename... Ix> T* mutable_data(Ix... index) {
  728. return static_cast<T*>(array::mutable_data(index...));
  729. }
  730. // Reference to element at a given index
  731. template<typename... Ix> const T& at(Ix... index) const {
  732. if (sizeof...(index) != ndim())
  733. fail_dim_check(sizeof...(index), "index dimension mismatch");
  734. return *(static_cast<const T*>(array::data()) + byte_offset(ssize_t(index)...) / itemsize());
  735. }
  736. // Mutable reference to element at a given index
  737. template<typename... Ix> T& mutable_at(Ix... index) {
  738. if (sizeof...(index) != ndim())
  739. fail_dim_check(sizeof...(index), "index dimension mismatch");
  740. return *(static_cast<T*>(array::mutable_data()) + byte_offset(ssize_t(index)...) / itemsize());
  741. }
  742. /**
  743. * Returns a proxy object that provides access to the array's data without bounds or
  744. * dimensionality checking. Will throw if the array is missing the `writeable` flag. Use with
  745. * care: the array must not be destroyed or reshaped for the duration of the returned object,
  746. * and the caller must take care not to access invalid dimensions or dimension indices.
  747. */
  748. template <ssize_t Dims = -1> detail::unchecked_mutable_reference<T, Dims> mutable_unchecked() & {
  749. return array::mutable_unchecked<T, Dims>();
  750. }
  751. /**
  752. * Returns a proxy object that provides const access to the array's data without bounds or
  753. * dimensionality checking. Unlike `unchecked()`, this does not require that the underlying
  754. * array have the `writable` flag. Use with care: the array must not be destroyed or reshaped
  755. * for the duration of the returned object, and the caller must take care not to access invalid
  756. * dimensions or dimension indices.
  757. */
  758. template <ssize_t Dims = -1> detail::unchecked_reference<T, Dims> unchecked() const & {
  759. return array::unchecked<T, Dims>();
  760. }
  761. /// Ensure that the argument is a NumPy array of the correct dtype (and if not, try to convert
  762. /// it). In case of an error, nullptr is returned and the Python error is cleared.
  763. static array_t ensure(handle h) {
  764. auto result = reinterpret_steal<array_t>(raw_array_t(h.ptr()));
  765. if (!result)
  766. PyErr_Clear();
  767. return result;
  768. }
  769. static bool check_(handle h) {
  770. const auto &api = detail::npy_api::get();
  771. return api.PyArray_Check_(h.ptr())
  772. && api.PyArray_EquivTypes_(detail::array_proxy(h.ptr())->descr, dtype::of<T>().ptr());
  773. }
  774. protected:
  775. /// Create array from any object -- always returns a new reference
  776. static PyObject *raw_array_t(PyObject *ptr) {
  777. if (ptr == nullptr) {
  778. PyErr_SetString(PyExc_ValueError, "cannot create a pybind11::array_t from a nullptr");
  779. return nullptr;
  780. }
  781. return detail::npy_api::get().PyArray_FromAny_(
  782. ptr, dtype::of<T>().release().ptr(), 0, 0,
  783. detail::npy_api::NPY_ARRAY_ENSUREARRAY_ | ExtraFlags, nullptr);
  784. }
  785. };
  786. template <typename T>
  787. struct format_descriptor<T, detail::enable_if_t<detail::is_pod_struct<T>::value>> {
  788. static std::string format() {
  789. return detail::npy_format_descriptor<typename std::remove_cv<T>::type>::format();
  790. }
  791. };
  792. template <size_t N> struct format_descriptor<char[N]> {
  793. static std::string format() { return std::to_string(N) + "s"; }
  794. };
  795. template <size_t N> struct format_descriptor<std::array<char, N>> {
  796. static std::string format() { return std::to_string(N) + "s"; }
  797. };
  798. template <typename T>
  799. struct format_descriptor<T, detail::enable_if_t<std::is_enum<T>::value>> {
  800. static std::string format() {
  801. return format_descriptor<
  802. typename std::remove_cv<typename std::underlying_type<T>::type>::type>::format();
  803. }
  804. };
  805. template <typename T>
  806. struct format_descriptor<T, detail::enable_if_t<detail::array_info<T>::is_array>> {
  807. static std::string format() {
  808. using namespace detail;
  809. static constexpr auto extents = _("(") + array_info<T>::extents + _(")");
  810. return extents.text + format_descriptor<remove_all_extents_t<T>>::format();
  811. }
  812. };
  813. NAMESPACE_BEGIN(detail)
  814. template <typename T, int ExtraFlags>
  815. struct pyobject_caster<array_t<T, ExtraFlags>> {
  816. using type = array_t<T, ExtraFlags>;
  817. bool load(handle src, bool convert) {
  818. if (!convert && !type::check_(src))
  819. return false;
  820. value = type::ensure(src);
  821. return static_cast<bool>(value);
  822. }
  823. static handle cast(const handle &src, return_value_policy /* policy */, handle /* parent */) {
  824. return src.inc_ref();
  825. }
  826. PYBIND11_TYPE_CASTER(type, handle_type_name<type>::name);
  827. };
  828. template <typename T>
  829. struct compare_buffer_info<T, detail::enable_if_t<detail::is_pod_struct<T>::value>> {
  830. static bool compare(const buffer_info& b) {
  831. return npy_api::get().PyArray_EquivTypes_(dtype::of<T>().ptr(), dtype(b).ptr());
  832. }
  833. };
  834. template <typename T, typename = void>
  835. struct npy_format_descriptor_name;
  836. template <typename T>
  837. struct npy_format_descriptor_name<T, enable_if_t<std::is_integral<T>::value>> {
  838. static constexpr auto name = _<std::is_same<T, bool>::value>(
  839. _("bool"), _<std::is_signed<T>::value>("int", "uint") + _<sizeof(T)*8>()
  840. );
  841. };
  842. template <typename T>
  843. struct npy_format_descriptor_name<T, enable_if_t<std::is_floating_point<T>::value>> {
  844. static constexpr auto name = _<std::is_same<T, float>::value || std::is_same<T, double>::value>(
  845. _("float") + _<sizeof(T)*8>(), _("longdouble")
  846. );
  847. };
  848. template <typename T>
  849. struct npy_format_descriptor_name<T, enable_if_t<is_complex<T>::value>> {
  850. static constexpr auto name = _<std::is_same<typename T::value_type, float>::value
  851. || std::is_same<typename T::value_type, double>::value>(
  852. _("complex") + _<sizeof(typename T::value_type)*16>(), _("longcomplex")
  853. );
  854. };
  855. template <typename T>
  856. struct npy_format_descriptor<T, enable_if_t<satisfies_any_of<T, std::is_arithmetic, is_complex>::value>>
  857. : npy_format_descriptor_name<T> {
  858. private:
  859. // NB: the order here must match the one in common.h
  860. constexpr static const int values[15] = {
  861. npy_api::NPY_BOOL_,
  862. npy_api::NPY_BYTE_, npy_api::NPY_UBYTE_, npy_api::NPY_SHORT_, npy_api::NPY_USHORT_,
  863. npy_api::NPY_INT_, npy_api::NPY_UINT_, npy_api::NPY_LONGLONG_, npy_api::NPY_ULONGLONG_,
  864. npy_api::NPY_FLOAT_, npy_api::NPY_DOUBLE_, npy_api::NPY_LONGDOUBLE_,
  865. npy_api::NPY_CFLOAT_, npy_api::NPY_CDOUBLE_, npy_api::NPY_CLONGDOUBLE_
  866. };
  867. public:
  868. static constexpr int value = values[detail::is_fmt_numeric<T>::index];
  869. static pybind11::dtype dtype() {
  870. if (auto ptr = npy_api::get().PyArray_DescrFromType_(value))
  871. return reinterpret_borrow<pybind11::dtype>(ptr);
  872. pybind11_fail("Unsupported buffer format!");
  873. }
  874. };
  875. #define PYBIND11_DECL_CHAR_FMT \
  876. static constexpr auto name = _("S") + _<N>(); \
  877. static pybind11::dtype dtype() { return pybind11::dtype(std::string("S") + std::to_string(N)); }
  878. template <size_t N> struct npy_format_descriptor<char[N]> { PYBIND11_DECL_CHAR_FMT };
  879. template <size_t N> struct npy_format_descriptor<std::array<char, N>> { PYBIND11_DECL_CHAR_FMT };
  880. #undef PYBIND11_DECL_CHAR_FMT
  881. template<typename T> struct npy_format_descriptor<T, enable_if_t<array_info<T>::is_array>> {
  882. private:
  883. using base_descr = npy_format_descriptor<typename array_info<T>::type>;
  884. public:
  885. static_assert(!array_info<T>::is_empty, "Zero-sized arrays are not supported");
  886. static constexpr auto name = _("(") + array_info<T>::extents + _(")") + base_descr::name;
  887. static pybind11::dtype dtype() {
  888. list shape;
  889. array_info<T>::append_extents(shape);
  890. return pybind11::dtype::from_args(pybind11::make_tuple(base_descr::dtype(), shape));
  891. }
  892. };
  893. template<typename T> struct npy_format_descriptor<T, enable_if_t<std::is_enum<T>::value>> {
  894. private:
  895. using base_descr = npy_format_descriptor<typename std::underlying_type<T>::type>;
  896. public:
  897. static constexpr auto name = base_descr::name;
  898. static pybind11::dtype dtype() { return base_descr::dtype(); }
  899. };
  900. struct field_descriptor {
  901. const char *name;
  902. ssize_t offset;
  903. ssize_t size;
  904. std::string format;
  905. dtype descr;
  906. };
  907. inline PYBIND11_NOINLINE void register_structured_dtype(
  908. any_container<field_descriptor> fields,
  909. const std::type_info& tinfo, ssize_t itemsize,
  910. bool (*direct_converter)(PyObject *, void *&)) {
  911. auto& numpy_internals = get_numpy_internals();
  912. if (numpy_internals.get_type_info(tinfo, false))
  913. pybind11_fail("NumPy: dtype is already registered");
  914. list names, formats, offsets;
  915. for (auto field : *fields) {
  916. if (!field.descr)
  917. pybind11_fail(std::string("NumPy: unsupported field dtype: `") +
  918. field.name + "` @ " + tinfo.name());
  919. names.append(PYBIND11_STR_TYPE(field.name));
  920. formats.append(field.descr);
  921. offsets.append(pybind11::int_(field.offset));
  922. }
  923. auto dtype_ptr = pybind11::dtype(names, formats, offsets, itemsize).release().ptr();
  924. // There is an existing bug in NumPy (as of v1.11): trailing bytes are
  925. // not encoded explicitly into the format string. This will supposedly
  926. // get fixed in v1.12; for further details, see these:
  927. // - https://github.com/numpy/numpy/issues/7797
  928. // - https://github.com/numpy/numpy/pull/7798
  929. // Because of this, we won't use numpy's logic to generate buffer format
  930. // strings and will just do it ourselves.
  931. std::vector<field_descriptor> ordered_fields(std::move(fields));
  932. std::sort(ordered_fields.begin(), ordered_fields.end(),
  933. [](const field_descriptor &a, const field_descriptor &b) { return a.offset < b.offset; });
  934. ssize_t offset = 0;
  935. std::ostringstream oss;
  936. // mark the structure as unaligned with '^', because numpy and C++ don't
  937. // always agree about alignment (particularly for complex), and we're
  938. // explicitly listing all our padding. This depends on none of the fields
  939. // overriding the endianness. Putting the ^ in front of individual fields
  940. // isn't guaranteed to work due to https://github.com/numpy/numpy/issues/9049
  941. oss << "^T{";
  942. for (auto& field : ordered_fields) {
  943. if (field.offset > offset)
  944. oss << (field.offset - offset) << 'x';
  945. oss << field.format << ':' << field.name << ':';
  946. offset = field.offset + field.size;
  947. }
  948. if (itemsize > offset)
  949. oss << (itemsize - offset) << 'x';
  950. oss << '}';
  951. auto format_str = oss.str();
  952. // Sanity check: verify that NumPy properly parses our buffer format string
  953. auto& api = npy_api::get();
  954. auto arr = array(buffer_info(nullptr, itemsize, format_str, 1));
  955. if (!api.PyArray_EquivTypes_(dtype_ptr, arr.dtype().ptr()))
  956. pybind11_fail("NumPy: invalid buffer descriptor!");
  957. auto tindex = std::type_index(tinfo);
  958. numpy_internals.registered_dtypes[tindex] = { dtype_ptr, format_str };
  959. get_internals().direct_conversions[tindex].push_back(direct_converter);
  960. }
  961. template <typename T, typename SFINAE> struct npy_format_descriptor {
  962. static_assert(is_pod_struct<T>::value, "Attempt to use a non-POD or unimplemented POD type as a numpy dtype");
  963. static constexpr auto name = make_caster<T>::name;
  964. static pybind11::dtype dtype() {
  965. return reinterpret_borrow<pybind11::dtype>(dtype_ptr());
  966. }
  967. static std::string format() {
  968. static auto format_str = get_numpy_internals().get_type_info<T>(true)->format_str;
  969. return format_str;
  970. }
  971. static void register_dtype(any_container<field_descriptor> fields) {
  972. register_structured_dtype(std::move(fields), typeid(typename std::remove_cv<T>::type),
  973. sizeof(T), &direct_converter);
  974. }
  975. private:
  976. static PyObject* dtype_ptr() {
  977. static PyObject* ptr = get_numpy_internals().get_type_info<T>(true)->dtype_ptr;
  978. return ptr;
  979. }
  980. static bool direct_converter(PyObject *obj, void*& value) {
  981. auto& api = npy_api::get();
  982. if (!PyObject_TypeCheck(obj, api.PyVoidArrType_Type_))
  983. return false;
  984. if (auto descr = reinterpret_steal<object>(api.PyArray_DescrFromScalar_(obj))) {
  985. if (api.PyArray_EquivTypes_(dtype_ptr(), descr.ptr())) {
  986. value = ((PyVoidScalarObject_Proxy *) obj)->obval;
  987. return true;
  988. }
  989. }
  990. return false;
  991. }
  992. };
  993. #ifdef __CLION_IDE__ // replace heavy macro with dummy code for the IDE (doesn't affect code)
  994. # define PYBIND11_NUMPY_DTYPE(Type, ...) ((void)0)
  995. # define PYBIND11_NUMPY_DTYPE_EX(Type, ...) ((void)0)
  996. #else
  997. #define PYBIND11_FIELD_DESCRIPTOR_EX(T, Field, Name) \
  998. ::pybind11::detail::field_descriptor { \
  999. Name, offsetof(T, Field), sizeof(decltype(std::declval<T>().Field)), \
  1000. ::pybind11::format_descriptor<decltype(std::declval<T>().Field)>::format(), \
  1001. ::pybind11::detail::npy_format_descriptor<decltype(std::declval<T>().Field)>::dtype() \
  1002. }
  1003. // Extract name, offset and format descriptor for a struct field
  1004. #define PYBIND11_FIELD_DESCRIPTOR(T, Field) PYBIND11_FIELD_DESCRIPTOR_EX(T, Field, #Field)
  1005. // The main idea of this macro is borrowed from https://github.com/swansontec/map-macro
  1006. // (C) William Swanson, Paul Fultz
  1007. #define PYBIND11_EVAL0(...) __VA_ARGS__
  1008. #define PYBIND11_EVAL1(...) PYBIND11_EVAL0 (PYBIND11_EVAL0 (PYBIND11_EVAL0 (__VA_ARGS__)))
  1009. #define PYBIND11_EVAL2(...) PYBIND11_EVAL1 (PYBIND11_EVAL1 (PYBIND11_EVAL1 (__VA_ARGS__)))
  1010. #define PYBIND11_EVAL3(...) PYBIND11_EVAL2 (PYBIND11_EVAL2 (PYBIND11_EVAL2 (__VA_ARGS__)))
  1011. #define PYBIND11_EVAL4(...) PYBIND11_EVAL3 (PYBIND11_EVAL3 (PYBIND11_EVAL3 (__VA_ARGS__)))
  1012. #define PYBIND11_EVAL(...) PYBIND11_EVAL4 (PYBIND11_EVAL4 (PYBIND11_EVAL4 (__VA_ARGS__)))
  1013. #define PYBIND11_MAP_END(...)
  1014. #define PYBIND11_MAP_OUT
  1015. #define PYBIND11_MAP_COMMA ,
  1016. #define PYBIND11_MAP_GET_END() 0, PYBIND11_MAP_END
  1017. #define PYBIND11_MAP_NEXT0(test, next, ...) next PYBIND11_MAP_OUT
  1018. #define PYBIND11_MAP_NEXT1(test, next) PYBIND11_MAP_NEXT0 (test, next, 0)
  1019. #define PYBIND11_MAP_NEXT(test, next) PYBIND11_MAP_NEXT1 (PYBIND11_MAP_GET_END test, next)
  1020. #ifdef _MSC_VER // MSVC is not as eager to expand macros, hence this workaround
  1021. #define PYBIND11_MAP_LIST_NEXT1(test, next) \
  1022. PYBIND11_EVAL0 (PYBIND11_MAP_NEXT0 (test, PYBIND11_MAP_COMMA next, 0))
  1023. #else
  1024. #define PYBIND11_MAP_LIST_NEXT1(test, next) \
  1025. PYBIND11_MAP_NEXT0 (test, PYBIND11_MAP_COMMA next, 0)
  1026. #endif
  1027. #define PYBIND11_MAP_LIST_NEXT(test, next) \
  1028. PYBIND11_MAP_LIST_NEXT1 (PYBIND11_MAP_GET_END test, next)
  1029. #define PYBIND11_MAP_LIST0(f, t, x, peek, ...) \
  1030. f(t, x) PYBIND11_MAP_LIST_NEXT (peek, PYBIND11_MAP_LIST1) (f, t, peek, __VA_ARGS__)
  1031. #define PYBIND11_MAP_LIST1(f, t, x, peek, ...) \
  1032. f(t, x) PYBIND11_MAP_LIST_NEXT (peek, PYBIND11_MAP_LIST0) (f, t, peek, __VA_ARGS__)
  1033. // PYBIND11_MAP_LIST(f, t, a1, a2, ...) expands to f(t, a1), f(t, a2), ...
  1034. #define PYBIND11_MAP_LIST(f, t, ...) \
  1035. PYBIND11_EVAL (PYBIND11_MAP_LIST1 (f, t, __VA_ARGS__, (), 0))
  1036. #define PYBIND11_NUMPY_DTYPE(Type, ...) \
  1037. ::pybind11::detail::npy_format_descriptor<Type>::register_dtype \
  1038. (::std::vector<::pybind11::detail::field_descriptor> \
  1039. {PYBIND11_MAP_LIST (PYBIND11_FIELD_DESCRIPTOR, Type, __VA_ARGS__)})
  1040. #ifdef _MSC_VER
  1041. #define PYBIND11_MAP2_LIST_NEXT1(test, next) \
  1042. PYBIND11_EVAL0 (PYBIND11_MAP_NEXT0 (test, PYBIND11_MAP_COMMA next, 0))
  1043. #else
  1044. #define PYBIND11_MAP2_LIST_NEXT1(test, next) \
  1045. PYBIND11_MAP_NEXT0 (test, PYBIND11_MAP_COMMA next, 0)
  1046. #endif
  1047. #define PYBIND11_MAP2_LIST_NEXT(test, next) \
  1048. PYBIND11_MAP2_LIST_NEXT1 (PYBIND11_MAP_GET_END test, next)
  1049. #define PYBIND11_MAP2_LIST0(f, t, x1, x2, peek, ...) \
  1050. f(t, x1, x2) PYBIND11_MAP2_LIST_NEXT (peek, PYBIND11_MAP2_LIST1) (f, t, peek, __VA_ARGS__)
  1051. #define PYBIND11_MAP2_LIST1(f, t, x1, x2, peek, ...) \
  1052. f(t, x1, x2) PYBIND11_MAP2_LIST_NEXT (peek, PYBIND11_MAP2_LIST0) (f, t, peek, __VA_ARGS__)
  1053. // PYBIND11_MAP2_LIST(f, t, a1, a2, ...) expands to f(t, a1, a2), f(t, a3, a4), ...
  1054. #define PYBIND11_MAP2_LIST(f, t, ...) \
  1055. PYBIND11_EVAL (PYBIND11_MAP2_LIST1 (f, t, __VA_ARGS__, (), 0))
  1056. #define PYBIND11_NUMPY_DTYPE_EX(Type, ...) \
  1057. ::pybind11::detail::npy_format_descriptor<Type>::register_dtype \
  1058. (::std::vector<::pybind11::detail::field_descriptor> \
  1059. {PYBIND11_MAP2_LIST (PYBIND11_FIELD_DESCRIPTOR_EX, Type, __VA_ARGS__)})
  1060. #endif // __CLION_IDE__
  1061. template <class T>
  1062. using array_iterator = typename std::add_pointer<T>::type;
  1063. template <class T>
  1064. array_iterator<T> array_begin(const buffer_info& buffer) {
  1065. return array_iterator<T>(reinterpret_cast<T*>(buffer.ptr));
  1066. }
  1067. template <class T>
  1068. array_iterator<T> array_end(const buffer_info& buffer) {
  1069. return array_iterator<T>(reinterpret_cast<T*>(buffer.ptr) + buffer.size);
  1070. }
  1071. class common_iterator {
  1072. public:
  1073. using container_type = std::vector<ssize_t>;
  1074. using value_type = container_type::value_type;
  1075. using size_type = container_type::size_type;
  1076. common_iterator() : p_ptr(0), m_strides() {}
  1077. common_iterator(void* ptr, const container_type& strides, const container_type& shape)
  1078. : p_ptr(reinterpret_cast<char*>(ptr)), m_strides(strides.size()) {
  1079. m_strides.back() = static_cast<value_type>(strides.back());
  1080. for (size_type i = m_strides.size() - 1; i != 0; --i) {
  1081. size_type j = i - 1;
  1082. value_type s = static_cast<value_type>(shape[i]);
  1083. m_strides[j] = strides[j] + m_strides[i] - strides[i] * s;
  1084. }
  1085. }
  1086. void increment(size_type dim) {
  1087. p_ptr += m_strides[dim];
  1088. }
  1089. void* data() const {
  1090. return p_ptr;
  1091. }
  1092. private:
  1093. char* p_ptr;
  1094. container_type m_strides;
  1095. };
  1096. template <size_t N> class multi_array_iterator {
  1097. public:
  1098. using container_type = std::vector<ssize_t>;
  1099. multi_array_iterator(const std::array<buffer_info, N> &buffers,
  1100. const container_type &shape)
  1101. : m_shape(shape.size()), m_index(shape.size(), 0),
  1102. m_common_iterator() {
  1103. // Manual copy to avoid conversion warning if using std::copy
  1104. for (size_t i = 0; i < shape.size(); ++i)
  1105. m_shape[i] = shape[i];
  1106. container_type strides(shape.size());
  1107. for (size_t i = 0; i < N; ++i)
  1108. init_common_iterator(buffers[i], shape, m_common_iterator[i], strides);
  1109. }
  1110. multi_array_iterator& operator++() {
  1111. for (size_t j = m_index.size(); j != 0; --j) {
  1112. size_t i = j - 1;
  1113. if (++m_index[i] != m_shape[i]) {
  1114. increment_common_iterator(i);
  1115. break;
  1116. } else {
  1117. m_index[i] = 0;
  1118. }
  1119. }
  1120. return *this;
  1121. }
  1122. template <size_t K, class T = void> T* data() const {
  1123. return reinterpret_cast<T*>(m_common_iterator[K].data());
  1124. }
  1125. private:
  1126. using common_iter = common_iterator;
  1127. void init_common_iterator(const buffer_info &buffer,
  1128. const container_type &shape,
  1129. common_iter &iterator,
  1130. container_type &strides) {
  1131. auto buffer_shape_iter = buffer.shape.rbegin();
  1132. auto buffer_strides_iter = buffer.strides.rbegin();
  1133. auto shape_iter = shape.rbegin();
  1134. auto strides_iter = strides.rbegin();
  1135. while (buffer_shape_iter != buffer.shape.rend()) {
  1136. if (*shape_iter == *buffer_shape_iter)
  1137. *strides_iter = *buffer_strides_iter;
  1138. else
  1139. *strides_iter = 0;
  1140. ++buffer_shape_iter;
  1141. ++buffer_strides_iter;
  1142. ++shape_iter;
  1143. ++strides_iter;
  1144. }
  1145. std::fill(strides_iter, strides.rend(), 0);
  1146. iterator = common_iter(buffer.ptr, strides, shape);
  1147. }
  1148. void increment_common_iterator(size_t dim) {
  1149. for (auto &iter : m_common_iterator)
  1150. iter.increment(dim);
  1151. }
  1152. container_type m_shape;
  1153. container_type m_index;
  1154. std::array<common_iter, N> m_common_iterator;
  1155. };
  1156. enum class broadcast_trivial { non_trivial, c_trivial, f_trivial };
  1157. // Populates the shape and number of dimensions for the set of buffers. Returns a broadcast_trivial
  1158. // enum value indicating whether the broadcast is "trivial"--that is, has each buffer being either a
  1159. // singleton or a full-size, C-contiguous (`c_trivial`) or Fortran-contiguous (`f_trivial`) storage
  1160. // buffer; returns `non_trivial` otherwise.
  1161. template <size_t N>
  1162. broadcast_trivial broadcast(const std::array<buffer_info, N> &buffers, ssize_t &ndim, std::vector<ssize_t> &shape) {
  1163. ndim = std::accumulate(buffers.begin(), buffers.end(), ssize_t(0), [](ssize_t res, const buffer_info &buf) {
  1164. return std::max(res, buf.ndim);
  1165. });
  1166. shape.clear();
  1167. shape.resize((size_t) ndim, 1);
  1168. // Figure out the output size, and make sure all input arrays conform (i.e. are either size 1 or
  1169. // the full size).
  1170. for (size_t i = 0; i < N; ++i) {
  1171. auto res_iter = shape.rbegin();
  1172. auto end = buffers[i].shape.rend();
  1173. for (auto shape_iter = buffers[i].shape.rbegin(); shape_iter != end; ++shape_iter, ++res_iter) {
  1174. const auto &dim_size_in = *shape_iter;
  1175. auto &dim_size_out = *res_iter;
  1176. // Each input dimension can either be 1 or `n`, but `n` values must match across buffers
  1177. if (dim_size_out == 1)
  1178. dim_size_out = dim_size_in;
  1179. else if (dim_size_in != 1 && dim_size_in != dim_size_out)
  1180. pybind11_fail("pybind11::vectorize: incompatible size/dimension of inputs!");
  1181. }
  1182. }
  1183. bool trivial_broadcast_c = true;
  1184. bool trivial_broadcast_f = true;
  1185. for (size_t i = 0; i < N && (trivial_broadcast_c || trivial_broadcast_f); ++i) {
  1186. if (buffers[i].size == 1)
  1187. continue;
  1188. // Require the same number of dimensions:
  1189. if (buffers[i].ndim != ndim)
  1190. return broadcast_trivial::non_trivial;
  1191. // Require all dimensions be full-size:
  1192. if (!std::equal(buffers[i].shape.cbegin(), buffers[i].shape.cend(), shape.cbegin()))
  1193. return broadcast_trivial::non_trivial;
  1194. // Check for C contiguity (but only if previous inputs were also C contiguous)
  1195. if (trivial_broadcast_c) {
  1196. ssize_t expect_stride = buffers[i].itemsize;
  1197. auto end = buffers[i].shape.crend();
  1198. for (auto shape_iter = buffers[i].shape.crbegin(), stride_iter = buffers[i].strides.crbegin();
  1199. trivial_broadcast_c && shape_iter != end; ++shape_iter, ++stride_iter) {
  1200. if (expect_stride == *stride_iter)
  1201. expect_stride *= *shape_iter;
  1202. else
  1203. trivial_broadcast_c = false;
  1204. }
  1205. }
  1206. // Check for Fortran contiguity (if previous inputs were also F contiguous)
  1207. if (trivial_broadcast_f) {
  1208. ssize_t expect_stride = buffers[i].itemsize;
  1209. auto end = buffers[i].shape.cend();
  1210. for (auto shape_iter = buffers[i].shape.cbegin(), stride_iter = buffers[i].strides.cbegin();
  1211. trivial_broadcast_f && shape_iter != end; ++shape_iter, ++stride_iter) {
  1212. if (expect_stride == *stride_iter)
  1213. expect_stride *= *shape_iter;
  1214. else
  1215. trivial_broadcast_f = false;
  1216. }
  1217. }
  1218. }
  1219. return
  1220. trivial_broadcast_c ? broadcast_trivial::c_trivial :
  1221. trivial_broadcast_f ? broadcast_trivial::f_trivial :
  1222. broadcast_trivial::non_trivial;
  1223. }
  1224. template <typename T>
  1225. struct vectorize_arg {
  1226. static_assert(!std::is_rvalue_reference<T>::value, "Functions with rvalue reference arguments cannot be vectorized");
  1227. // The wrapped function gets called with this type:
  1228. using call_type = remove_reference_t<T>;
  1229. // Is this a vectorized argument?
  1230. static constexpr bool vectorize =
  1231. satisfies_any_of<call_type, std::is_arithmetic, is_complex, std::is_pod>::value &&
  1232. satisfies_none_of<call_type, std::is_pointer, std::is_array, is_std_array, std::is_enum>::value &&
  1233. (!std::is_reference<T>::value ||
  1234. (std::is_lvalue_reference<T>::value && std::is_const<call_type>::value));
  1235. // Accept this type: an array for vectorized types, otherwise the type as-is:
  1236. using type = conditional_t<vectorize, array_t<remove_cv_t<call_type>, array::forcecast>, T>;
  1237. };
  1238. template <typename Func, typename Return, typename... Args>
  1239. struct vectorize_helper {
  1240. private:
  1241. static constexpr size_t N = sizeof...(Args);
  1242. static constexpr size_t NVectorized = constexpr_sum(vectorize_arg<Args>::vectorize...);
  1243. static_assert(NVectorized >= 1,
  1244. "pybind11::vectorize(...) requires a function with at least one vectorizable argument");
  1245. public:
  1246. template <typename T>
  1247. explicit vectorize_helper(T &&f) : f(std::forward<T>(f)) { }
  1248. object operator()(typename vectorize_arg<Args>::type... args) {
  1249. return run(args...,
  1250. make_index_sequence<N>(),
  1251. select_indices<vectorize_arg<Args>::vectorize...>(),
  1252. make_index_sequence<NVectorized>());
  1253. }
  1254. private:
  1255. remove_reference_t<Func> f;
  1256. // Internal compiler error in MSVC 19.16.27025.1 (Visual Studio 2017 15.9.4), when compiling with "/permissive-" flag
  1257. // when arg_call_types is manually inlined.
  1258. using arg_call_types = std::tuple<typename vectorize_arg<Args>::call_type...>;
  1259. template <size_t Index> using param_n_t = typename std::tuple_element<Index, arg_call_types>::type;
  1260. // Runs a vectorized function given arguments tuple and three index sequences:
  1261. // - Index is the full set of 0 ... (N-1) argument indices;
  1262. // - VIndex is the subset of argument indices with vectorized parameters, letting us access
  1263. // vectorized arguments (anything not in this sequence is passed through)
  1264. // - BIndex is a incremental sequence (beginning at 0) of the same size as VIndex, so that
  1265. // we can store vectorized buffer_infos in an array (argument VIndex has its buffer at
  1266. // index BIndex in the array).
  1267. template <size_t... Index, size_t... VIndex, size_t... BIndex> object run(
  1268. typename vectorize_arg<Args>::type &...args,
  1269. index_sequence<Index...> i_seq, index_sequence<VIndex...> vi_seq, index_sequence<BIndex...> bi_seq) {
  1270. // Pointers to values the function was called with; the vectorized ones set here will start
  1271. // out as array_t<T> pointers, but they will be changed them to T pointers before we make
  1272. // call the wrapped function. Non-vectorized pointers are left as-is.
  1273. std::array<void *, N> params{{ &args... }};
  1274. // The array of `buffer_info`s of vectorized arguments:
  1275. std::array<buffer_info, NVectorized> buffers{{ reinterpret_cast<array *>(params[VIndex])->request()... }};
  1276. /* Determine dimensions parameters of output array */
  1277. ssize_t nd = 0;
  1278. std::vector<ssize_t> shape(0);
  1279. auto trivial = broadcast(buffers, nd, shape);
  1280. size_t ndim = (size_t) nd;
  1281. size_t size = std::accumulate(shape.begin(), shape.end(), (size_t) 1, std::multiplies<size_t>());
  1282. // If all arguments are 0-dimension arrays (i.e. single values) return a plain value (i.e.
  1283. // not wrapped in an array).
  1284. if (size == 1 && ndim == 0) {
  1285. PYBIND11_EXPAND_SIDE_EFFECTS(params[VIndex] = buffers[BIndex].ptr);
  1286. return cast(f(*reinterpret_cast<param_n_t<Index> *>(params[Index])...));
  1287. }
  1288. array_t<Return> result;
  1289. if (trivial == broadcast_trivial::f_trivial) result = array_t<Return, array::f_style>(shape);
  1290. else result = array_t<Return>(shape);
  1291. if (size == 0) return result;
  1292. /* Call the function */
  1293. if (trivial == broadcast_trivial::non_trivial)
  1294. apply_broadcast(buffers, params, result, i_seq, vi_seq, bi_seq);
  1295. else
  1296. apply_trivial(buffers, params, result.mutable_data(), size, i_seq, vi_seq, bi_seq);
  1297. return result;
  1298. }
  1299. template <size_t... Index, size_t... VIndex, size_t... BIndex>
  1300. void apply_trivial(std::array<buffer_info, NVectorized> &buffers,
  1301. std::array<void *, N> &params,
  1302. Return *out,
  1303. size_t size,
  1304. index_sequence<Index...>, index_sequence<VIndex...>, index_sequence<BIndex...>) {
  1305. // Initialize an array of mutable byte references and sizes with references set to the
  1306. // appropriate pointer in `params`; as we iterate, we'll increment each pointer by its size
  1307. // (except for singletons, which get an increment of 0).
  1308. std::array<std::pair<unsigned char *&, const size_t>, NVectorized> vecparams{{
  1309. std::pair<unsigned char *&, const size_t>(
  1310. reinterpret_cast<unsigned char *&>(params[VIndex] = buffers[BIndex].ptr),
  1311. buffers[BIndex].size == 1 ? 0 : sizeof(param_n_t<VIndex>)
  1312. )...
  1313. }};
  1314. for (size_t i = 0; i < size; ++i) {
  1315. out[i] = f(*reinterpret_cast<param_n_t<Index> *>(params[Index])...);
  1316. for (auto &x : vecparams) x.first += x.second;
  1317. }
  1318. }
  1319. template <size_t... Index, size_t... VIndex, size_t... BIndex>
  1320. void apply_broadcast(std::array<buffer_info, NVectorized> &buffers,
  1321. std::array<void *, N> &params,
  1322. array_t<Return> &output_array,
  1323. index_sequence<Index...>, index_sequence<VIndex...>, index_sequence<BIndex...>) {
  1324. buffer_info output = output_array.request();
  1325. multi_array_iterator<NVectorized> input_iter(buffers, output.shape);
  1326. for (array_iterator<Return> iter = array_begin<Return>(output), end = array_end<Return>(output);
  1327. iter != end;
  1328. ++iter, ++input_iter) {
  1329. PYBIND11_EXPAND_SIDE_EFFECTS((
  1330. params[VIndex] = input_iter.template data<BIndex>()
  1331. ));
  1332. *iter = f(*reinterpret_cast<param_n_t<Index> *>(std::get<Index>(params))...);
  1333. }
  1334. }
  1335. };
  1336. template <typename Func, typename Return, typename... Args>
  1337. vectorize_helper<Func, Return, Args...>
  1338. vectorize_extractor(const Func &f, Return (*) (Args ...)) {
  1339. return detail::vectorize_helper<Func, Return, Args...>(f);
  1340. }
  1341. template <typename T, int Flags> struct handle_type_name<array_t<T, Flags>> {
  1342. static constexpr auto name = _("numpy.ndarray[") + npy_format_descriptor<T>::name + _("]");
  1343. };
  1344. NAMESPACE_END(detail)
  1345. // Vanilla pointer vectorizer:
  1346. template <typename Return, typename... Args>
  1347. detail::vectorize_helper<Return (*)(Args...), Return, Args...>
  1348. vectorize(Return (*f) (Args ...)) {
  1349. return detail::vectorize_helper<Return (*)(Args...), Return, Args...>(f);
  1350. }
  1351. // lambda vectorizer:
  1352. template <typename Func, detail::enable_if_t<detail::is_lambda<Func>::value, int> = 0>
  1353. auto vectorize(Func &&f) -> decltype(
  1354. detail::vectorize_extractor(std::forward<Func>(f), (detail::function_signature_t<Func> *) nullptr)) {
  1355. return detail::vectorize_extractor(std::forward<Func>(f), (detail::function_signature_t<Func> *) nullptr);
  1356. }
  1357. // Vectorize a class method (non-const):
  1358. template <typename Return, typename Class, typename... Args,
  1359. typename Helper = detail::vectorize_helper<decltype(std::mem_fn(std::declval<Return (Class::*)(Args...)>())), Return, Class *, Args...>>
  1360. Helper vectorize(Return (Class::*f)(Args...)) {
  1361. return Helper(std::mem_fn(f));
  1362. }
  1363. // Vectorize a class method (const):
  1364. template <typename Return, typename Class, typename... Args,
  1365. typename Helper = detail::vectorize_helper<decltype(std::mem_fn(std::declval<Return (Class::*)(Args...) const>())), Return, const Class *, Args...>>
  1366. Helper vectorize(Return (Class::*f)(Args...) const) {
  1367. return Helper(std::mem_fn(f));
  1368. }
  1369. NAMESPACE_END(PYBIND11_NAMESPACE)
  1370. #if defined(_MSC_VER)
  1371. #pragma warning(pop)
  1372. #endif