1 /* 2 Copyright (c) 2016-2021 Timur Gafarov 3 4 Boost Software License - Version 1.0 - August 17th, 2003 5 6 Permission is hereby granted, free of charge, to any person or organization 7 obtaining a copy of the software and accompanying documentation covered by 8 this license (the "Software") to use, reproduce, display, distribute, 9 execute, and transmit the Software, and to prepare derivative works of the 10 Software, and to permit third-parties to whom the Software is furnished to 11 do so, all subject to the following: 12 13 The copyright notices in the Software and this entire statement, including 14 the above license grant, this restriction and the following disclaimer, 15 must be included in all copies of the Software, in whole or in part, and 16 all derivative works of the Software, unless such copies or derivative 17 works are solely in the form of machine-executable object code generated by 18 a source language processor. 19 20 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 21 IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 22 FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT 23 SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE 24 FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, 25 ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 26 DEALINGS IN THE SOFTWARE. 27 */ 28 29 /** 30 * N-dimensional numeric data structure 31 * 32 * Copyright: Timur Gafarov 2016-2021. 33 * License: $(LINK2 boost.org/LICENSE_1_0.txt, Boost License 1.0). 34 * Authors: Timur Gafarov 35 */ 36 module dlib.math.tensor; 37 38 import std.traits; 39 import std.math; 40 import std.conv; 41 import std.range; 42 import std.format; 43 44 import dlib.core.tuple; 45 import dlib.core.compound; 46 import dlib.core.memory; 47 48 T zero(T)() if (isNumeric!T) 49 { 50 return T(0); 51 } 52 53 size_t calcLen(T...)(T n) 54 { 55 size_t len = 1; 56 foreach(s; n) 57 len *= s; 58 return len; 59 } 60 61 template NTypeTuple(T, int n) 62 { 63 static if (n <= 0) 64 alias NTypeTuple = Tuple!(); 65 else 66 alias NTypeTuple = Tuple!(NTypeTuple!(T, n-1), T); 67 } 68 69 enum MaxStaticTensorSize = double.sizeof * 16; // fit 4x4 matrix of doubles 70 71 /** 72 Generic multi-dimensional array template 73 74 Description: 75 This template mainly serves as a base for creating various 76 more specialized algebraic objects via encapsulation. 77 Think of Tensor as a backend for e.g. Vector and Matrix. 78 79 T - element type, usually numeric (float or double) 80 81 dim - number of dimensions (tensor order): 82 0 - scalar, 83 1 - vector, 84 2 - matrix, 85 3 - 3D array, 86 (higer dimensions are also possible). 87 88 sizes - tuple defining sizes for each dimension: 89 3 - 3-vector, 90 4,4 - 4x4 matrix, 91 etc. 92 93 Data storage type (stack or heap) is statically selected: if given size(s) 94 imply data size larger than MaxStaticTensorSize, data is allocated 95 on heap (as dynamic array). Otherwise, data is allocated on stack (as static array). 96 */ 97 template Tensor(T, size_t dim, sizes...) 98 { 99 // TODO: 100 // - External storage 101 // - Component-wise addition, subtraction 102 103 struct Tensor 104 { 105 private enum size_t _dataLen = calcLen(sizes); 106 107 alias ElementType = T; 108 enum size_t dimensions = dim; 109 enum size_t order = dim; 110 alias Sizes = sizes; 111 enum bool isTensor = true; 112 enum bool isScalar = (order == 0 && _dataLen == 1); 113 enum bool isVector = (order == 1); 114 enum bool isMatrix = (order == 2); 115 enum bool dynamic = (_dataLen * T.sizeof) > MaxStaticTensorSize; 116 117 static assert(order == sizes.length, 118 "Illegal size for Tensor"); 119 120 static if (order > 0) 121 { 122 static assert(sizes.length, 123 "Illegal size for 0-order Tensor"); 124 } 125 126 static if (isVector) 127 { 128 static assert(sizes.length == 1, 129 "Illegal size for 1st-order Tensor"); 130 } 131 132 static if (isMatrix) 133 { 134 static assert(sizes.length == 2, 135 "Illegal size for 2nd-order Tensor"); 136 137 enum size_t rows = sizes[0]; 138 enum size_t cols = sizes[1]; 139 140 enum bool isSquareMatrix = (rows == cols); 141 142 static if (isSquareMatrix) 143 { 144 enum size = sizes[0]; 145 } 146 } 147 else 148 { 149 enum bool isSquareMatrix = false; 150 151 static if (sizes.length > 0) 152 { 153 enum size = sizes[0]; 154 } 155 } 156 157 /** 158 * Single element constructor 159 */ 160 this(T initVal) 161 { 162 static if (dynamic) 163 { 164 allocate(); 165 } 166 167 foreach(ref v; data) 168 v = initVal; 169 } 170 171 /** 172 * Tensor constructor 173 */ 174 this(Tensor!(T, order, sizes) t) 175 { 176 static if (dynamic) 177 { 178 allocate(); 179 } 180 181 foreach(i, v; t.arrayof) 182 { 183 arrayof[i] = v; 184 } 185 } 186 187 /** 188 * Tuple constructor 189 */ 190 this(F...)(F components) if (F.length > 1) 191 { 192 static if (dynamic) 193 { 194 allocate(); 195 } 196 197 foreach(i, v; components) 198 { 199 static if (i < arrayof.length) 200 arrayof[i] = cast(T)v; 201 } 202 } 203 204 static Tensor!(T, order, sizes) init() 205 { 206 Tensor!(T, order, sizes) res; 207 static if (dynamic) 208 { 209 res.allocate(); 210 } 211 return res; 212 } 213 214 static Tensor!(T, order, sizes) zero() 215 { 216 Tensor!(T, order, sizes) res; 217 static if (dynamic) 218 { 219 res.allocate(); 220 } 221 foreach(ref v; res.data) 222 v = .zero!T(); 223 return res; 224 } 225 226 /** 227 * T = Tensor[index] 228 */ 229 auto ref T opIndex(this X)(size_t index) 230 in 231 { 232 assert ((0 <= index) && (index < _dataLen), 233 "Tensor.opIndex: array index out of bounds"); 234 } 235 do 236 { 237 return arrayof[index]; 238 } 239 240 /** 241 * Tensor[index] = T 242 */ 243 void opIndexAssign(T n, size_t index) 244 in 245 { 246 assert (index < _dataLen, 247 "Tensor.opIndexAssign: array index out of bounds"); 248 } 249 do 250 { 251 arrayof[index] = n; 252 } 253 254 /** 255 * T = Tensor[i, j, ...] 256 */ 257 T opIndex(I...)(in I indices) const if (I.length == sizes.length) 258 { 259 size_t index = 0; 260 size_t m = 1; 261 foreach(i, ind; indices) 262 { 263 index += ind * m; 264 m *= sizes[i]; 265 } 266 return arrayof[index]; 267 } 268 269 /** 270 * Tensor[i, j, ...] = T 271 */ 272 T opIndexAssign(I...)(in T t, in I indices) if (I.length == sizes.length) 273 { 274 size_t index = 0; 275 size_t m = 1; 276 foreach(i, ind; indices) 277 { 278 index += ind * m; 279 m *= sizes[i]; 280 } 281 return (arrayof[index] = t); 282 } 283 284 /** 285 * Tensor = Tensor 286 */ 287 void opAssign (Tensor!(T, order, sizes) t) 288 { 289 static if (dynamic) 290 { 291 allocate(); 292 } 293 294 foreach(i, v; t.arrayof) 295 { 296 arrayof[i] = v; 297 } 298 } 299 300 alias Indices = NTypeTuple!(size_t, order); 301 302 int opApply(scope int delegate(ref T v, Indices indices) dg) 303 { 304 int result = 0; 305 Compound!(Indices) ind; 306 size_t index = 0; 307 308 while(index < data.length) 309 { 310 result = dg(data[index], ind.tuple); 311 if (result) 312 break; 313 314 ind[0]++; 315 316 foreach(i; RangeTuple!(0, order)) 317 { 318 if (ind[i] == sizes[i]) 319 { 320 ind[i] = 0; 321 static if (i < order-1) 322 { 323 ind[i+1]++; 324 } 325 } 326 } 327 328 index++; 329 } 330 331 return result; 332 } 333 334 @property string toString() const 335 { 336 static if (isScalar) 337 { 338 return x.to!string; 339 } 340 else 341 { 342 auto writer = appender!string(); 343 formattedWrite(writer, "%s", arrayof); 344 return writer.data; 345 } 346 } 347 348 @property size_t length() 349 { 350 return data.length; 351 } 352 353 @property bool initialized() 354 { 355 return (data.length > 0); 356 } 357 358 static if (isVector) 359 { 360 private static bool valid(string s) 361 { 362 if (s.length < 2) 363 return false; 364 365 foreach(c; s) 366 { 367 switch(c) 368 { 369 case 'w', 'a', 'q': 370 if (size < 4) return false; 371 else break; 372 case 'z', 'b', 'p': 373 if (size < 3) return false; 374 else break; 375 case 'y', 'g', 't': 376 if (size < 2) return false; 377 else break; 378 case 'x', 'r', 's': 379 if (size < 1) return false; 380 else break; 381 default: 382 return false; 383 } 384 } 385 return true; 386 } 387 388 static if (size < 5) 389 { 390 /** 391 * Symbolic element access for vector 392 */ 393 private static string vecElements(string[4] letters) @property 394 { 395 string res; 396 foreach (i; 0..size) 397 { 398 res ~= "T " ~ letters[i] ~ "; "; 399 } 400 return res; 401 } 402 } 403 404 /** 405 * Swizzling 406 */ 407 template opDispatch(string s) if (valid(s)) 408 { 409 static if (s.length <= 4) 410 { 411 @property auto ref opDispatch(this X)() 412 { 413 auto extend(string s) 414 { 415 while (s.length < 4) 416 s ~= s[$-1]; 417 return s; 418 } 419 420 enum p = extend(s); 421 enum i = (char c) => ['x':0, 'y':1, 'z':2, 'w':3, 422 'r':0, 'g':1, 'b':2, 'a':3, 423 's':0, 't':1, 'p':2, 'q':3][c]; 424 enum i0 = i(p[0]), 425 i1 = i(p[1]), 426 i2 = i(p[2]), 427 i3 = i(p[3]); 428 429 static if (s.length == 4) 430 return Tensor!(T,1,4)(arrayof[i0], arrayof[i1], arrayof[i2], arrayof[i3]); 431 else static if (s.length == 3) 432 return Tensor!(T,1,3)(arrayof[i0], arrayof[i1], arrayof[i2]); 433 else static if (s.length == 2) 434 return Tensor!(T,1,2)(arrayof[i0], arrayof[i1]); 435 } 436 } 437 } 438 } 439 440 static if (dynamic) 441 { 442 T[] data; 443 444 private void allocate() 445 { 446 if (data.length == 0) 447 data = New!(T[])(_dataLen); 448 } 449 450 void free() 451 { 452 if (data.length) 453 Delete(data); 454 } 455 } 456 else 457 { 458 union 459 { 460 T[_dataLen] data; 461 462 static if (isScalar) 463 { 464 T x; 465 } 466 467 static if (isVector) 468 { 469 static if (size < 5) 470 { 471 struct { mixin(vecElements(["x", "y", "z", "w"])); } 472 struct { mixin(vecElements(["r", "g", "b", "a"])); } 473 struct { mixin(vecElements(["s", "t", "p", "q"])); } 474 } 475 } 476 } 477 478 static if (isScalar) 479 { 480 alias x this; 481 } 482 } 483 484 alias arrayof = data; 485 486 } 487 } 488 489 /** 490 * Tensor product 491 * 492 * Description: 493 * Tensor product of two tensors of order N 494 * and sizes S1 and S2 gives a tensor of order 2N 495 * and sizes (S1,S2). 496 */ 497 auto tensorProduct(T1, T2)(T1 t1, T2 t2) 498 { 499 // TODO: ensure T1, t2 are Tensors 500 // TODO: if T1 and T2 are scalars, use ordinary multiplication 501 // TODO: if T1 and T2 are vectors, use optimized version 502 503 static assert(T1.dimensions == T2.dimensions); 504 505 alias T = T1.ElementType; 506 enum order = T1.dimensions + T2.dimensions; 507 alias sizes = Tuple!(T2.Sizes, T1.Sizes); 508 alias TensorType = Tensor!(T, order, sizes); 509 510 TensorType t; 511 static if (TensorType.dynamic) 512 { 513 t = TensorType.init(); 514 } 515 516 Compound!(TensorType.Indices) ind; 517 size_t index = 0; 518 519 while(index < t.data.length) 520 { 521 t.data[index] = 522 t2[ind.tuple[0..$/2]] * 523 t1[ind.tuple[$/2..$]]; 524 525 ind[0]++; 526 527 foreach(i; RangeTuple!(0, order)) 528 { 529 if (ind[i] == sizes[i]) 530 { 531 ind[i] = 0; 532 static if (i < order-1) 533 { 534 ind[i+1]++; 535 } 536 } 537 } 538 539 index++; 540 } 541 542 return t; 543 }