Photo.java 103 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935
//
// This file is auto-generated. Please don't modify it!
//
package org.opencv.photo;

import java.util.ArrayList;
import java.util.List;
import org.opencv.core.Mat;
import org.opencv.core.MatOfFloat;
import org.opencv.core.Point;
import org.opencv.photo.AlignMTB;
import org.opencv.photo.CalibrateDebevec;
import org.opencv.photo.CalibrateRobertson;
import org.opencv.photo.MergeDebevec;
import org.opencv.photo.MergeMertens;
import org.opencv.photo.MergeRobertson;
import org.opencv.photo.Tonemap;
import org.opencv.photo.TonemapDrago;
import org.opencv.photo.TonemapMantiuk;
import org.opencv.photo.TonemapReinhard;
import org.opencv.utils.Converters;

// C++: class Photo

public class Photo {

    // C++: enum <unnamed>
    public static final int
            INPAINT_NS = 0,
            INPAINT_TELEA = 1,
            LDR_SIZE = 256,
            NORMAL_CLONE = 1,
            MIXED_CLONE = 2,
            MONOCHROME_TRANSFER = 3,
            RECURS_FILTER = 1,
            NORMCONV_FILTER = 2;


    //
    // C++:  void cv::inpaint(Mat src, Mat inpaintMask, Mat& dst, double inpaintRadius, int flags)
    //

    /**
     * Restores the selected region in an image using the region neighborhood.
     *
     * @param src Input 8-bit, 16-bit unsigned or 32-bit float 1-channel or 8-bit 3-channel image.
     * @param inpaintMask Inpainting mask, 8-bit 1-channel image. Non-zero pixels indicate the area that
     * needs to be inpainted.
     * @param dst Output image with the same size and type as src .
     * @param inpaintRadius Radius of a circular neighborhood of each point inpainted that is considered
     * by the algorithm.
     * @param flags Inpainting method that could be cv::INPAINT_NS or cv::INPAINT_TELEA
     *
     * The function reconstructs the selected image area from the pixel near the area boundary. The
     * function may be used to remove dust and scratches from a scanned photo, or to remove undesirable
     * objects from still images or video. See &lt;http://en.wikipedia.org/wiki/Inpainting&gt; for more details.
     *
     * <b>Note:</b>
     * <ul>
     *   <li>
     *       An example using the inpainting technique can be found at
     *         opencv_source_code/samples/cpp/inpaint.cpp
     *   </li>
     *   <li>
     *       (Python) An example using the inpainting technique can be found at
     *         opencv_source_code/samples/python/inpaint.py
     *   </li>
     * </ul>
     */
    public static void inpaint(Mat src, Mat inpaintMask, Mat dst, double inpaintRadius, int flags) {
        inpaint_0(src.nativeObj, inpaintMask.nativeObj, dst.nativeObj, inpaintRadius, flags);
    }


    //
    // C++:  void cv::fastNlMeansDenoising(Mat src, Mat& dst, float h = 3, int templateWindowSize = 7, int searchWindowSize = 21)
    //

    /**
     * Perform image denoising using Non-local Means Denoising algorithm
     * &lt;http://www.ipol.im/pub/algo/bcm_non_local_means_denoising/&gt; with several computational
     * optimizations. Noise expected to be a gaussian white noise
     *
     * @param src Input 8-bit 1-channel, 2-channel, 3-channel or 4-channel image.
     * @param dst Output image with the same size and type as src .
     * @param templateWindowSize Size in pixels of the template patch that is used to compute weights.
     * Should be odd. Recommended value 7 pixels
     * @param searchWindowSize Size in pixels of the window that is used to compute weighted average for
     * given pixel. Should be odd. Affect performance linearly: greater searchWindowsSize - greater
     * denoising time. Recommended value 21 pixels
     * @param h Parameter regulating filter strength. Big h value perfectly removes noise but also
     * removes image details, smaller h value preserves details but also preserves some noise
     *
     * This function expected to be applied to grayscale images. For colored images look at
     * fastNlMeansDenoisingColored. Advanced usage of this functions can be manual denoising of colored
     * image in different colorspaces. Such approach is used in fastNlMeansDenoisingColored by converting
     * image to CIELAB colorspace and then separately denoise L and AB components with different h
     * parameter.
     */
    public static void fastNlMeansDenoising(Mat src, Mat dst, float h, int templateWindowSize, int searchWindowSize) {
        fastNlMeansDenoising_0(src.nativeObj, dst.nativeObj, h, templateWindowSize, searchWindowSize);
    }

    /**
     * Perform image denoising using Non-local Means Denoising algorithm
     * &lt;http://www.ipol.im/pub/algo/bcm_non_local_means_denoising/&gt; with several computational
     * optimizations. Noise expected to be a gaussian white noise
     *
     * @param src Input 8-bit 1-channel, 2-channel, 3-channel or 4-channel image.
     * @param dst Output image with the same size and type as src .
     * @param templateWindowSize Size in pixels of the template patch that is used to compute weights.
     * Should be odd. Recommended value 7 pixels
     * given pixel. Should be odd. Affect performance linearly: greater searchWindowsSize - greater
     * denoising time. Recommended value 21 pixels
     * @param h Parameter regulating filter strength. Big h value perfectly removes noise but also
     * removes image details, smaller h value preserves details but also preserves some noise
     *
     * This function expected to be applied to grayscale images. For colored images look at
     * fastNlMeansDenoisingColored. Advanced usage of this functions can be manual denoising of colored
     * image in different colorspaces. Such approach is used in fastNlMeansDenoisingColored by converting
     * image to CIELAB colorspace and then separately denoise L and AB components with different h
     * parameter.
     */
    public static void fastNlMeansDenoising(Mat src, Mat dst, float h, int templateWindowSize) {
        fastNlMeansDenoising_1(src.nativeObj, dst.nativeObj, h, templateWindowSize);
    }

    /**
     * Perform image denoising using Non-local Means Denoising algorithm
     * &lt;http://www.ipol.im/pub/algo/bcm_non_local_means_denoising/&gt; with several computational
     * optimizations. Noise expected to be a gaussian white noise
     *
     * @param src Input 8-bit 1-channel, 2-channel, 3-channel or 4-channel image.
     * @param dst Output image with the same size and type as src .
     * Should be odd. Recommended value 7 pixels
     * given pixel. Should be odd. Affect performance linearly: greater searchWindowsSize - greater
     * denoising time. Recommended value 21 pixels
     * @param h Parameter regulating filter strength. Big h value perfectly removes noise but also
     * removes image details, smaller h value preserves details but also preserves some noise
     *
     * This function expected to be applied to grayscale images. For colored images look at
     * fastNlMeansDenoisingColored. Advanced usage of this functions can be manual denoising of colored
     * image in different colorspaces. Such approach is used in fastNlMeansDenoisingColored by converting
     * image to CIELAB colorspace and then separately denoise L and AB components with different h
     * parameter.
     */
    public static void fastNlMeansDenoising(Mat src, Mat dst, float h) {
        fastNlMeansDenoising_2(src.nativeObj, dst.nativeObj, h);
    }

    /**
     * Perform image denoising using Non-local Means Denoising algorithm
     * &lt;http://www.ipol.im/pub/algo/bcm_non_local_means_denoising/&gt; with several computational
     * optimizations. Noise expected to be a gaussian white noise
     *
     * @param src Input 8-bit 1-channel, 2-channel, 3-channel or 4-channel image.
     * @param dst Output image with the same size and type as src .
     * Should be odd. Recommended value 7 pixels
     * given pixel. Should be odd. Affect performance linearly: greater searchWindowsSize - greater
     * denoising time. Recommended value 21 pixels
     * removes image details, smaller h value preserves details but also preserves some noise
     *
     * This function expected to be applied to grayscale images. For colored images look at
     * fastNlMeansDenoisingColored. Advanced usage of this functions can be manual denoising of colored
     * image in different colorspaces. Such approach is used in fastNlMeansDenoisingColored by converting
     * image to CIELAB colorspace and then separately denoise L and AB components with different h
     * parameter.
     */
    public static void fastNlMeansDenoising(Mat src, Mat dst) {
        fastNlMeansDenoising_3(src.nativeObj, dst.nativeObj);
    }


    //
    // C++:  void cv::fastNlMeansDenoising(Mat src, Mat& dst, vector_float h, int templateWindowSize = 7, int searchWindowSize = 21, int normType = NORM_L2)
    //

    /**
     * Perform image denoising using Non-local Means Denoising algorithm
     * &lt;http://www.ipol.im/pub/algo/bcm_non_local_means_denoising/&gt; with several computational
     * optimizations. Noise expected to be a gaussian white noise
     *
     * @param src Input 8-bit or 16-bit (only with NORM_L1) 1-channel,
     * 2-channel, 3-channel or 4-channel image.
     * @param dst Output image with the same size and type as src .
     * @param templateWindowSize Size in pixels of the template patch that is used to compute weights.
     * Should be odd. Recommended value 7 pixels
     * @param searchWindowSize Size in pixels of the window that is used to compute weighted average for
     * given pixel. Should be odd. Affect performance linearly: greater searchWindowsSize - greater
     * denoising time. Recommended value 21 pixels
     * @param h Array of parameters regulating filter strength, either one
     * parameter applied to all channels or one per channel in dst. Big h value
     * perfectly removes noise but also removes image details, smaller h
     * value preserves details but also preserves some noise
     * @param normType Type of norm used for weight calculation. Can be either NORM_L2 or NORM_L1
     *
     * This function expected to be applied to grayscale images. For colored images look at
     * fastNlMeansDenoisingColored. Advanced usage of this functions can be manual denoising of colored
     * image in different colorspaces. Such approach is used in fastNlMeansDenoisingColored by converting
     * image to CIELAB colorspace and then separately denoise L and AB components with different h
     * parameter.
     */
    public static void fastNlMeansDenoising(Mat src, Mat dst, MatOfFloat h, int templateWindowSize, int searchWindowSize, int normType) {
        Mat h_mat = h;
        fastNlMeansDenoising_4(src.nativeObj, dst.nativeObj, h_mat.nativeObj, templateWindowSize, searchWindowSize, normType);
    }

    /**
     * Perform image denoising using Non-local Means Denoising algorithm
     * &lt;http://www.ipol.im/pub/algo/bcm_non_local_means_denoising/&gt; with several computational
     * optimizations. Noise expected to be a gaussian white noise
     *
     * @param src Input 8-bit or 16-bit (only with NORM_L1) 1-channel,
     * 2-channel, 3-channel or 4-channel image.
     * @param dst Output image with the same size and type as src .
     * @param templateWindowSize Size in pixels of the template patch that is used to compute weights.
     * Should be odd. Recommended value 7 pixels
     * @param searchWindowSize Size in pixels of the window that is used to compute weighted average for
     * given pixel. Should be odd. Affect performance linearly: greater searchWindowsSize - greater
     * denoising time. Recommended value 21 pixels
     * @param h Array of parameters regulating filter strength, either one
     * parameter applied to all channels or one per channel in dst. Big h value
     * perfectly removes noise but also removes image details, smaller h
     * value preserves details but also preserves some noise
     *
     * This function expected to be applied to grayscale images. For colored images look at
     * fastNlMeansDenoisingColored. Advanced usage of this functions can be manual denoising of colored
     * image in different colorspaces. Such approach is used in fastNlMeansDenoisingColored by converting
     * image to CIELAB colorspace and then separately denoise L and AB components with different h
     * parameter.
     */
    public static void fastNlMeansDenoising(Mat src, Mat dst, MatOfFloat h, int templateWindowSize, int searchWindowSize) {
        Mat h_mat = h;
        fastNlMeansDenoising_5(src.nativeObj, dst.nativeObj, h_mat.nativeObj, templateWindowSize, searchWindowSize);
    }

    /**
     * Perform image denoising using Non-local Means Denoising algorithm
     * &lt;http://www.ipol.im/pub/algo/bcm_non_local_means_denoising/&gt; with several computational
     * optimizations. Noise expected to be a gaussian white noise
     *
     * @param src Input 8-bit or 16-bit (only with NORM_L1) 1-channel,
     * 2-channel, 3-channel or 4-channel image.
     * @param dst Output image with the same size and type as src .
     * @param templateWindowSize Size in pixels of the template patch that is used to compute weights.
     * Should be odd. Recommended value 7 pixels
     * given pixel. Should be odd. Affect performance linearly: greater searchWindowsSize - greater
     * denoising time. Recommended value 21 pixels
     * @param h Array of parameters regulating filter strength, either one
     * parameter applied to all channels or one per channel in dst. Big h value
     * perfectly removes noise but also removes image details, smaller h
     * value preserves details but also preserves some noise
     *
     * This function expected to be applied to grayscale images. For colored images look at
     * fastNlMeansDenoisingColored. Advanced usage of this functions can be manual denoising of colored
     * image in different colorspaces. Such approach is used in fastNlMeansDenoisingColored by converting
     * image to CIELAB colorspace and then separately denoise L and AB components with different h
     * parameter.
     */
    public static void fastNlMeansDenoising(Mat src, Mat dst, MatOfFloat h, int templateWindowSize) {
        Mat h_mat = h;
        fastNlMeansDenoising_6(src.nativeObj, dst.nativeObj, h_mat.nativeObj, templateWindowSize);
    }

    /**
     * Perform image denoising using Non-local Means Denoising algorithm
     * &lt;http://www.ipol.im/pub/algo/bcm_non_local_means_denoising/&gt; with several computational
     * optimizations. Noise expected to be a gaussian white noise
     *
     * @param src Input 8-bit or 16-bit (only with NORM_L1) 1-channel,
     * 2-channel, 3-channel or 4-channel image.
     * @param dst Output image with the same size and type as src .
     * Should be odd. Recommended value 7 pixels
     * given pixel. Should be odd. Affect performance linearly: greater searchWindowsSize - greater
     * denoising time. Recommended value 21 pixels
     * @param h Array of parameters regulating filter strength, either one
     * parameter applied to all channels or one per channel in dst. Big h value
     * perfectly removes noise but also removes image details, smaller h
     * value preserves details but also preserves some noise
     *
     * This function expected to be applied to grayscale images. For colored images look at
     * fastNlMeansDenoisingColored. Advanced usage of this functions can be manual denoising of colored
     * image in different colorspaces. Such approach is used in fastNlMeansDenoisingColored by converting
     * image to CIELAB colorspace and then separately denoise L and AB components with different h
     * parameter.
     */
    public static void fastNlMeansDenoising(Mat src, Mat dst, MatOfFloat h) {
        Mat h_mat = h;
        fastNlMeansDenoising_7(src.nativeObj, dst.nativeObj, h_mat.nativeObj);
    }


    //
    // C++:  void cv::fastNlMeansDenoisingColored(Mat src, Mat& dst, float h = 3, float hColor = 3, int templateWindowSize = 7, int searchWindowSize = 21)
    //

    /**
     * Modification of fastNlMeansDenoising function for colored images
     *
     * @param src Input 8-bit 3-channel image.
     * @param dst Output image with the same size and type as src .
     * @param templateWindowSize Size in pixels of the template patch that is used to compute weights.
     * Should be odd. Recommended value 7 pixels
     * @param searchWindowSize Size in pixels of the window that is used to compute weighted average for
     * given pixel. Should be odd. Affect performance linearly: greater searchWindowsSize - greater
     * denoising time. Recommended value 21 pixels
     * @param h Parameter regulating filter strength for luminance component. Bigger h value perfectly
     * removes noise but also removes image details, smaller h value preserves details but also preserves
     * some noise
     * @param hColor The same as h but for color components. For most images value equals 10
     * will be enough to remove colored noise and do not distort colors
     *
     * The function converts image to CIELAB colorspace and then separately denoise L and AB components
     * with given h parameters using fastNlMeansDenoising function.
     */
    public static void fastNlMeansDenoisingColored(Mat src, Mat dst, float h, float hColor, int templateWindowSize, int searchWindowSize) {
        fastNlMeansDenoisingColored_0(src.nativeObj, dst.nativeObj, h, hColor, templateWindowSize, searchWindowSize);
    }

    /**
     * Modification of fastNlMeansDenoising function for colored images
     *
     * @param src Input 8-bit 3-channel image.
     * @param dst Output image with the same size and type as src .
     * @param templateWindowSize Size in pixels of the template patch that is used to compute weights.
     * Should be odd. Recommended value 7 pixels
     * given pixel. Should be odd. Affect performance linearly: greater searchWindowsSize - greater
     * denoising time. Recommended value 21 pixels
     * @param h Parameter regulating filter strength for luminance component. Bigger h value perfectly
     * removes noise but also removes image details, smaller h value preserves details but also preserves
     * some noise
     * @param hColor The same as h but for color components. For most images value equals 10
     * will be enough to remove colored noise and do not distort colors
     *
     * The function converts image to CIELAB colorspace and then separately denoise L and AB components
     * with given h parameters using fastNlMeansDenoising function.
     */
    public static void fastNlMeansDenoisingColored(Mat src, Mat dst, float h, float hColor, int templateWindowSize) {
        fastNlMeansDenoisingColored_1(src.nativeObj, dst.nativeObj, h, hColor, templateWindowSize);
    }

    /**
     * Modification of fastNlMeansDenoising function for colored images
     *
     * @param src Input 8-bit 3-channel image.
     * @param dst Output image with the same size and type as src .
     * Should be odd. Recommended value 7 pixels
     * given pixel. Should be odd. Affect performance linearly: greater searchWindowsSize - greater
     * denoising time. Recommended value 21 pixels
     * @param h Parameter regulating filter strength for luminance component. Bigger h value perfectly
     * removes noise but also removes image details, smaller h value preserves details but also preserves
     * some noise
     * @param hColor The same as h but for color components. For most images value equals 10
     * will be enough to remove colored noise and do not distort colors
     *
     * The function converts image to CIELAB colorspace and then separately denoise L and AB components
     * with given h parameters using fastNlMeansDenoising function.
     */
    public static void fastNlMeansDenoisingColored(Mat src, Mat dst, float h, float hColor) {
        fastNlMeansDenoisingColored_2(src.nativeObj, dst.nativeObj, h, hColor);
    }

    /**
     * Modification of fastNlMeansDenoising function for colored images
     *
     * @param src Input 8-bit 3-channel image.
     * @param dst Output image with the same size and type as src .
     * Should be odd. Recommended value 7 pixels
     * given pixel. Should be odd. Affect performance linearly: greater searchWindowsSize - greater
     * denoising time. Recommended value 21 pixels
     * @param h Parameter regulating filter strength for luminance component. Bigger h value perfectly
     * removes noise but also removes image details, smaller h value preserves details but also preserves
     * some noise
     * will be enough to remove colored noise and do not distort colors
     *
     * The function converts image to CIELAB colorspace and then separately denoise L and AB components
     * with given h parameters using fastNlMeansDenoising function.
     */
    public static void fastNlMeansDenoisingColored(Mat src, Mat dst, float h) {
        fastNlMeansDenoisingColored_3(src.nativeObj, dst.nativeObj, h);
    }

    /**
     * Modification of fastNlMeansDenoising function for colored images
     *
     * @param src Input 8-bit 3-channel image.
     * @param dst Output image with the same size and type as src .
     * Should be odd. Recommended value 7 pixels
     * given pixel. Should be odd. Affect performance linearly: greater searchWindowsSize - greater
     * denoising time. Recommended value 21 pixels
     * removes noise but also removes image details, smaller h value preserves details but also preserves
     * some noise
     * will be enough to remove colored noise and do not distort colors
     *
     * The function converts image to CIELAB colorspace and then separately denoise L and AB components
     * with given h parameters using fastNlMeansDenoising function.
     */
    public static void fastNlMeansDenoisingColored(Mat src, Mat dst) {
        fastNlMeansDenoisingColored_4(src.nativeObj, dst.nativeObj);
    }


    //
    // C++:  void cv::fastNlMeansDenoisingMulti(vector_Mat srcImgs, Mat& dst, int imgToDenoiseIndex, int temporalWindowSize, float h = 3, int templateWindowSize = 7, int searchWindowSize = 21)
    //

    /**
     * Modification of fastNlMeansDenoising function for images sequence where consecutive images have been
     * captured in small period of time. For example video. This version of the function is for grayscale
     * images or for manual manipulation with colorspaces. See CITE: Buades2005DenoisingIS for more details
     * (open access [here](https://static.aminer.org/pdf/PDF/000/317/196/spatio_temporal_wiener_filtering_of_image_sequences_using_a_parametric.pdf)).
     *
     * @param srcImgs Input 8-bit 1-channel, 2-channel, 3-channel or
     * 4-channel images sequence. All images should have the same type and
     * size.
     * @param imgToDenoiseIndex Target image to denoise index in srcImgs sequence
     * @param temporalWindowSize Number of surrounding images to use for target image denoising. Should
     * be odd. Images from imgToDenoiseIndex - temporalWindowSize / 2 to
     * imgToDenoiseIndex - temporalWindowSize / 2 from srcImgs will be used to denoise
     * srcImgs[imgToDenoiseIndex] image.
     * @param dst Output image with the same size and type as srcImgs images.
     * @param templateWindowSize Size in pixels of the template patch that is used to compute weights.
     * Should be odd. Recommended value 7 pixels
     * @param searchWindowSize Size in pixels of the window that is used to compute weighted average for
     * given pixel. Should be odd. Affect performance linearly: greater searchWindowsSize - greater
     * denoising time. Recommended value 21 pixels
     * @param h Parameter regulating filter strength. Bigger h value
     * perfectly removes noise but also removes image details, smaller h
     * value preserves details but also preserves some noise
     */
    public static void fastNlMeansDenoisingMulti(List<Mat> srcImgs, Mat dst, int imgToDenoiseIndex, int temporalWindowSize, float h, int templateWindowSize, int searchWindowSize) {
        Mat srcImgs_mat = Converters.vector_Mat_to_Mat(srcImgs);
        fastNlMeansDenoisingMulti_0(srcImgs_mat.nativeObj, dst.nativeObj, imgToDenoiseIndex, temporalWindowSize, h, templateWindowSize, searchWindowSize);
    }

    /**
     * Modification of fastNlMeansDenoising function for images sequence where consecutive images have been
     * captured in small period of time. For example video. This version of the function is for grayscale
     * images or for manual manipulation with colorspaces. See CITE: Buades2005DenoisingIS for more details
     * (open access [here](https://static.aminer.org/pdf/PDF/000/317/196/spatio_temporal_wiener_filtering_of_image_sequences_using_a_parametric.pdf)).
     *
     * @param srcImgs Input 8-bit 1-channel, 2-channel, 3-channel or
     * 4-channel images sequence. All images should have the same type and
     * size.
     * @param imgToDenoiseIndex Target image to denoise index in srcImgs sequence
     * @param temporalWindowSize Number of surrounding images to use for target image denoising. Should
     * be odd. Images from imgToDenoiseIndex - temporalWindowSize / 2 to
     * imgToDenoiseIndex - temporalWindowSize / 2 from srcImgs will be used to denoise
     * srcImgs[imgToDenoiseIndex] image.
     * @param dst Output image with the same size and type as srcImgs images.
     * @param templateWindowSize Size in pixels of the template patch that is used to compute weights.
     * Should be odd. Recommended value 7 pixels
     * given pixel. Should be odd. Affect performance linearly: greater searchWindowsSize - greater
     * denoising time. Recommended value 21 pixels
     * @param h Parameter regulating filter strength. Bigger h value
     * perfectly removes noise but also removes image details, smaller h
     * value preserves details but also preserves some noise
     */
    public static void fastNlMeansDenoisingMulti(List<Mat> srcImgs, Mat dst, int imgToDenoiseIndex, int temporalWindowSize, float h, int templateWindowSize) {
        Mat srcImgs_mat = Converters.vector_Mat_to_Mat(srcImgs);
        fastNlMeansDenoisingMulti_1(srcImgs_mat.nativeObj, dst.nativeObj, imgToDenoiseIndex, temporalWindowSize, h, templateWindowSize);
    }

    /**
     * Modification of fastNlMeansDenoising function for images sequence where consecutive images have been
     * captured in small period of time. For example video. This version of the function is for grayscale
     * images or for manual manipulation with colorspaces. See CITE: Buades2005DenoisingIS for more details
     * (open access [here](https://static.aminer.org/pdf/PDF/000/317/196/spatio_temporal_wiener_filtering_of_image_sequences_using_a_parametric.pdf)).
     *
     * @param srcImgs Input 8-bit 1-channel, 2-channel, 3-channel or
     * 4-channel images sequence. All images should have the same type and
     * size.
     * @param imgToDenoiseIndex Target image to denoise index in srcImgs sequence
     * @param temporalWindowSize Number of surrounding images to use for target image denoising. Should
     * be odd. Images from imgToDenoiseIndex - temporalWindowSize / 2 to
     * imgToDenoiseIndex - temporalWindowSize / 2 from srcImgs will be used to denoise
     * srcImgs[imgToDenoiseIndex] image.
     * @param dst Output image with the same size and type as srcImgs images.
     * Should be odd. Recommended value 7 pixels
     * given pixel. Should be odd. Affect performance linearly: greater searchWindowsSize - greater
     * denoising time. Recommended value 21 pixels
     * @param h Parameter regulating filter strength. Bigger h value
     * perfectly removes noise but also removes image details, smaller h
     * value preserves details but also preserves some noise
     */
    public static void fastNlMeansDenoisingMulti(List<Mat> srcImgs, Mat dst, int imgToDenoiseIndex, int temporalWindowSize, float h) {
        Mat srcImgs_mat = Converters.vector_Mat_to_Mat(srcImgs);
        fastNlMeansDenoisingMulti_2(srcImgs_mat.nativeObj, dst.nativeObj, imgToDenoiseIndex, temporalWindowSize, h);
    }

    /**
     * Modification of fastNlMeansDenoising function for images sequence where consecutive images have been
     * captured in small period of time. For example video. This version of the function is for grayscale
     * images or for manual manipulation with colorspaces. See CITE: Buades2005DenoisingIS for more details
     * (open access [here](https://static.aminer.org/pdf/PDF/000/317/196/spatio_temporal_wiener_filtering_of_image_sequences_using_a_parametric.pdf)).
     *
     * @param srcImgs Input 8-bit 1-channel, 2-channel, 3-channel or
     * 4-channel images sequence. All images should have the same type and
     * size.
     * @param imgToDenoiseIndex Target image to denoise index in srcImgs sequence
     * @param temporalWindowSize Number of surrounding images to use for target image denoising. Should
     * be odd. Images from imgToDenoiseIndex - temporalWindowSize / 2 to
     * imgToDenoiseIndex - temporalWindowSize / 2 from srcImgs will be used to denoise
     * srcImgs[imgToDenoiseIndex] image.
     * @param dst Output image with the same size and type as srcImgs images.
     * Should be odd. Recommended value 7 pixels
     * given pixel. Should be odd. Affect performance linearly: greater searchWindowsSize - greater
     * denoising time. Recommended value 21 pixels
     * perfectly removes noise but also removes image details, smaller h
     * value preserves details but also preserves some noise
     */
    public static void fastNlMeansDenoisingMulti(List<Mat> srcImgs, Mat dst, int imgToDenoiseIndex, int temporalWindowSize) {
        Mat srcImgs_mat = Converters.vector_Mat_to_Mat(srcImgs);
        fastNlMeansDenoisingMulti_3(srcImgs_mat.nativeObj, dst.nativeObj, imgToDenoiseIndex, temporalWindowSize);
    }


    //
    // C++:  void cv::fastNlMeansDenoisingMulti(vector_Mat srcImgs, Mat& dst, int imgToDenoiseIndex, int temporalWindowSize, vector_float h, int templateWindowSize = 7, int searchWindowSize = 21, int normType = NORM_L2)
    //

    /**
     * Modification of fastNlMeansDenoising function for images sequence where consecutive images have been
     * captured in small period of time. For example video. This version of the function is for grayscale
     * images or for manual manipulation with colorspaces. See CITE: Buades2005DenoisingIS for more details
     * (open access [here](https://static.aminer.org/pdf/PDF/000/317/196/spatio_temporal_wiener_filtering_of_image_sequences_using_a_parametric.pdf)).
     *
     * @param srcImgs Input 8-bit or 16-bit (only with NORM_L1) 1-channel,
     * 2-channel, 3-channel or 4-channel images sequence. All images should
     * have the same type and size.
     * @param imgToDenoiseIndex Target image to denoise index in srcImgs sequence
     * @param temporalWindowSize Number of surrounding images to use for target image denoising. Should
     * be odd. Images from imgToDenoiseIndex - temporalWindowSize / 2 to
     * imgToDenoiseIndex - temporalWindowSize / 2 from srcImgs will be used to denoise
     * srcImgs[imgToDenoiseIndex] image.
     * @param dst Output image with the same size and type as srcImgs images.
     * @param templateWindowSize Size in pixels of the template patch that is used to compute weights.
     * Should be odd. Recommended value 7 pixels
     * @param searchWindowSize Size in pixels of the window that is used to compute weighted average for
     * given pixel. Should be odd. Affect performance linearly: greater searchWindowsSize - greater
     * denoising time. Recommended value 21 pixels
     * @param h Array of parameters regulating filter strength, either one
     * parameter applied to all channels or one per channel in dst. Big h value
     * perfectly removes noise but also removes image details, smaller h
     * value preserves details but also preserves some noise
     * @param normType Type of norm used for weight calculation. Can be either NORM_L2 or NORM_L1
     */
    public static void fastNlMeansDenoisingMulti(List<Mat> srcImgs, Mat dst, int imgToDenoiseIndex, int temporalWindowSize, MatOfFloat h, int templateWindowSize, int searchWindowSize, int normType) {
        Mat srcImgs_mat = Converters.vector_Mat_to_Mat(srcImgs);
        Mat h_mat = h;
        fastNlMeansDenoisingMulti_4(srcImgs_mat.nativeObj, dst.nativeObj, imgToDenoiseIndex, temporalWindowSize, h_mat.nativeObj, templateWindowSize, searchWindowSize, normType);
    }

    /**
     * Modification of fastNlMeansDenoising function for images sequence where consecutive images have been
     * captured in small period of time. For example video. This version of the function is for grayscale
     * images or for manual manipulation with colorspaces. See CITE: Buades2005DenoisingIS for more details
     * (open access [here](https://static.aminer.org/pdf/PDF/000/317/196/spatio_temporal_wiener_filtering_of_image_sequences_using_a_parametric.pdf)).
     *
     * @param srcImgs Input 8-bit or 16-bit (only with NORM_L1) 1-channel,
     * 2-channel, 3-channel or 4-channel images sequence. All images should
     * have the same type and size.
     * @param imgToDenoiseIndex Target image to denoise index in srcImgs sequence
     * @param temporalWindowSize Number of surrounding images to use for target image denoising. Should
     * be odd. Images from imgToDenoiseIndex - temporalWindowSize / 2 to
     * imgToDenoiseIndex - temporalWindowSize / 2 from srcImgs will be used to denoise
     * srcImgs[imgToDenoiseIndex] image.
     * @param dst Output image with the same size and type as srcImgs images.
     * @param templateWindowSize Size in pixels of the template patch that is used to compute weights.
     * Should be odd. Recommended value 7 pixels
     * @param searchWindowSize Size in pixels of the window that is used to compute weighted average for
     * given pixel. Should be odd. Affect performance linearly: greater searchWindowsSize - greater
     * denoising time. Recommended value 21 pixels
     * @param h Array of parameters regulating filter strength, either one
     * parameter applied to all channels or one per channel in dst. Big h value
     * perfectly removes noise but also removes image details, smaller h
     * value preserves details but also preserves some noise
     */
    public static void fastNlMeansDenoisingMulti(List<Mat> srcImgs, Mat dst, int imgToDenoiseIndex, int temporalWindowSize, MatOfFloat h, int templateWindowSize, int searchWindowSize) {
        Mat srcImgs_mat = Converters.vector_Mat_to_Mat(srcImgs);
        Mat h_mat = h;
        fastNlMeansDenoisingMulti_5(srcImgs_mat.nativeObj, dst.nativeObj, imgToDenoiseIndex, temporalWindowSize, h_mat.nativeObj, templateWindowSize, searchWindowSize);
    }

    /**
     * Modification of fastNlMeansDenoising function for images sequence where consecutive images have been
     * captured in small period of time. For example video. This version of the function is for grayscale
     * images or for manual manipulation with colorspaces. See CITE: Buades2005DenoisingIS for more details
     * (open access [here](https://static.aminer.org/pdf/PDF/000/317/196/spatio_temporal_wiener_filtering_of_image_sequences_using_a_parametric.pdf)).
     *
     * @param srcImgs Input 8-bit or 16-bit (only with NORM_L1) 1-channel,
     * 2-channel, 3-channel or 4-channel images sequence. All images should
     * have the same type and size.
     * @param imgToDenoiseIndex Target image to denoise index in srcImgs sequence
     * @param temporalWindowSize Number of surrounding images to use for target image denoising. Should
     * be odd. Images from imgToDenoiseIndex - temporalWindowSize / 2 to
     * imgToDenoiseIndex - temporalWindowSize / 2 from srcImgs will be used to denoise
     * srcImgs[imgToDenoiseIndex] image.
     * @param dst Output image with the same size and type as srcImgs images.
     * @param templateWindowSize Size in pixels of the template patch that is used to compute weights.
     * Should be odd. Recommended value 7 pixels
     * given pixel. Should be odd. Affect performance linearly: greater searchWindowsSize - greater
     * denoising time. Recommended value 21 pixels
     * @param h Array of parameters regulating filter strength, either one
     * parameter applied to all channels or one per channel in dst. Big h value
     * perfectly removes noise but also removes image details, smaller h
     * value preserves details but also preserves some noise
     */
    public static void fastNlMeansDenoisingMulti(List<Mat> srcImgs, Mat dst, int imgToDenoiseIndex, int temporalWindowSize, MatOfFloat h, int templateWindowSize) {
        Mat srcImgs_mat = Converters.vector_Mat_to_Mat(srcImgs);
        Mat h_mat = h;
        fastNlMeansDenoisingMulti_6(srcImgs_mat.nativeObj, dst.nativeObj, imgToDenoiseIndex, temporalWindowSize, h_mat.nativeObj, templateWindowSize);
    }

    /**
     * Modification of fastNlMeansDenoising function for images sequence where consecutive images have been
     * captured in small period of time. For example video. This version of the function is for grayscale
     * images or for manual manipulation with colorspaces. See CITE: Buades2005DenoisingIS for more details
     * (open access [here](https://static.aminer.org/pdf/PDF/000/317/196/spatio_temporal_wiener_filtering_of_image_sequences_using_a_parametric.pdf)).
     *
     * @param srcImgs Input 8-bit or 16-bit (only with NORM_L1) 1-channel,
     * 2-channel, 3-channel or 4-channel images sequence. All images should
     * have the same type and size.
     * @param imgToDenoiseIndex Target image to denoise index in srcImgs sequence
     * @param temporalWindowSize Number of surrounding images to use for target image denoising. Should
     * be odd. Images from imgToDenoiseIndex - temporalWindowSize / 2 to
     * imgToDenoiseIndex - temporalWindowSize / 2 from srcImgs will be used to denoise
     * srcImgs[imgToDenoiseIndex] image.
     * @param dst Output image with the same size and type as srcImgs images.
     * Should be odd. Recommended value 7 pixels
     * given pixel. Should be odd. Affect performance linearly: greater searchWindowsSize - greater
     * denoising time. Recommended value 21 pixels
     * @param h Array of parameters regulating filter strength, either one
     * parameter applied to all channels or one per channel in dst. Big h value
     * perfectly removes noise but also removes image details, smaller h
     * value preserves details but also preserves some noise
     */
    public static void fastNlMeansDenoisingMulti(List<Mat> srcImgs, Mat dst, int imgToDenoiseIndex, int temporalWindowSize, MatOfFloat h) {
        Mat srcImgs_mat = Converters.vector_Mat_to_Mat(srcImgs);
        Mat h_mat = h;
        fastNlMeansDenoisingMulti_7(srcImgs_mat.nativeObj, dst.nativeObj, imgToDenoiseIndex, temporalWindowSize, h_mat.nativeObj);
    }


    //
    // C++:  void cv::fastNlMeansDenoisingColoredMulti(vector_Mat srcImgs, Mat& dst, int imgToDenoiseIndex, int temporalWindowSize, float h = 3, float hColor = 3, int templateWindowSize = 7, int searchWindowSize = 21)
    //

    /**
     * Modification of fastNlMeansDenoisingMulti function for colored images sequences
     *
     * @param srcImgs Input 8-bit 3-channel images sequence. All images should have the same type and
     * size.
     * @param imgToDenoiseIndex Target image to denoise index in srcImgs sequence
     * @param temporalWindowSize Number of surrounding images to use for target image denoising. Should
     * be odd. Images from imgToDenoiseIndex - temporalWindowSize / 2 to
     * imgToDenoiseIndex - temporalWindowSize / 2 from srcImgs will be used to denoise
     * srcImgs[imgToDenoiseIndex] image.
     * @param dst Output image with the same size and type as srcImgs images.
     * @param templateWindowSize Size in pixels of the template patch that is used to compute weights.
     * Should be odd. Recommended value 7 pixels
     * @param searchWindowSize Size in pixels of the window that is used to compute weighted average for
     * given pixel. Should be odd. Affect performance linearly: greater searchWindowsSize - greater
     * denoising time. Recommended value 21 pixels
     * @param h Parameter regulating filter strength for luminance component. Bigger h value perfectly
     * removes noise but also removes image details, smaller h value preserves details but also preserves
     * some noise.
     * @param hColor The same as h but for color components.
     *
     * The function converts images to CIELAB colorspace and then separately denoise L and AB components
     * with given h parameters using fastNlMeansDenoisingMulti function.
     */
    public static void fastNlMeansDenoisingColoredMulti(List<Mat> srcImgs, Mat dst, int imgToDenoiseIndex, int temporalWindowSize, float h, float hColor, int templateWindowSize, int searchWindowSize) {
        Mat srcImgs_mat = Converters.vector_Mat_to_Mat(srcImgs);
        fastNlMeansDenoisingColoredMulti_0(srcImgs_mat.nativeObj, dst.nativeObj, imgToDenoiseIndex, temporalWindowSize, h, hColor, templateWindowSize, searchWindowSize);
    }

    /**
     * Modification of fastNlMeansDenoisingMulti function for colored images sequences
     *
     * @param srcImgs Input 8-bit 3-channel images sequence. All images should have the same type and
     * size.
     * @param imgToDenoiseIndex Target image to denoise index in srcImgs sequence
     * @param temporalWindowSize Number of surrounding images to use for target image denoising. Should
     * be odd. Images from imgToDenoiseIndex - temporalWindowSize / 2 to
     * imgToDenoiseIndex - temporalWindowSize / 2 from srcImgs will be used to denoise
     * srcImgs[imgToDenoiseIndex] image.
     * @param dst Output image with the same size and type as srcImgs images.
     * @param templateWindowSize Size in pixels of the template patch that is used to compute weights.
     * Should be odd. Recommended value 7 pixels
     * given pixel. Should be odd. Affect performance linearly: greater searchWindowsSize - greater
     * denoising time. Recommended value 21 pixels
     * @param h Parameter regulating filter strength for luminance component. Bigger h value perfectly
     * removes noise but also removes image details, smaller h value preserves details but also preserves
     * some noise.
     * @param hColor The same as h but for color components.
     *
     * The function converts images to CIELAB colorspace and then separately denoise L and AB components
     * with given h parameters using fastNlMeansDenoisingMulti function.
     */
    public static void fastNlMeansDenoisingColoredMulti(List<Mat> srcImgs, Mat dst, int imgToDenoiseIndex, int temporalWindowSize, float h, float hColor, int templateWindowSize) {
        Mat srcImgs_mat = Converters.vector_Mat_to_Mat(srcImgs);
        fastNlMeansDenoisingColoredMulti_1(srcImgs_mat.nativeObj, dst.nativeObj, imgToDenoiseIndex, temporalWindowSize, h, hColor, templateWindowSize);
    }

    /**
     * Modification of fastNlMeansDenoisingMulti function for colored images sequences
     *
     * @param srcImgs Input 8-bit 3-channel images sequence. All images should have the same type and
     * size.
     * @param imgToDenoiseIndex Target image to denoise index in srcImgs sequence
     * @param temporalWindowSize Number of surrounding images to use for target image denoising. Should
     * be odd. Images from imgToDenoiseIndex - temporalWindowSize / 2 to
     * imgToDenoiseIndex - temporalWindowSize / 2 from srcImgs will be used to denoise
     * srcImgs[imgToDenoiseIndex] image.
     * @param dst Output image with the same size and type as srcImgs images.
     * Should be odd. Recommended value 7 pixels
     * given pixel. Should be odd. Affect performance linearly: greater searchWindowsSize - greater
     * denoising time. Recommended value 21 pixels
     * @param h Parameter regulating filter strength for luminance component. Bigger h value perfectly
     * removes noise but also removes image details, smaller h value preserves details but also preserves
     * some noise.
     * @param hColor The same as h but for color components.
     *
     * The function converts images to CIELAB colorspace and then separately denoise L and AB components
     * with given h parameters using fastNlMeansDenoisingMulti function.
     */
    public static void fastNlMeansDenoisingColoredMulti(List<Mat> srcImgs, Mat dst, int imgToDenoiseIndex, int temporalWindowSize, float h, float hColor) {
        Mat srcImgs_mat = Converters.vector_Mat_to_Mat(srcImgs);
        fastNlMeansDenoisingColoredMulti_2(srcImgs_mat.nativeObj, dst.nativeObj, imgToDenoiseIndex, temporalWindowSize, h, hColor);
    }

    /**
     * Modification of fastNlMeansDenoisingMulti function for colored images sequences
     *
     * @param srcImgs Input 8-bit 3-channel images sequence. All images should have the same type and
     * size.
     * @param imgToDenoiseIndex Target image to denoise index in srcImgs sequence
     * @param temporalWindowSize Number of surrounding images to use for target image denoising. Should
     * be odd. Images from imgToDenoiseIndex - temporalWindowSize / 2 to
     * imgToDenoiseIndex - temporalWindowSize / 2 from srcImgs will be used to denoise
     * srcImgs[imgToDenoiseIndex] image.
     * @param dst Output image with the same size and type as srcImgs images.
     * Should be odd. Recommended value 7 pixels
     * given pixel. Should be odd. Affect performance linearly: greater searchWindowsSize - greater
     * denoising time. Recommended value 21 pixels
     * @param h Parameter regulating filter strength for luminance component. Bigger h value perfectly
     * removes noise but also removes image details, smaller h value preserves details but also preserves
     * some noise.
     *
     * The function converts images to CIELAB colorspace and then separately denoise L and AB components
     * with given h parameters using fastNlMeansDenoisingMulti function.
     */
    public static void fastNlMeansDenoisingColoredMulti(List<Mat> srcImgs, Mat dst, int imgToDenoiseIndex, int temporalWindowSize, float h) {
        Mat srcImgs_mat = Converters.vector_Mat_to_Mat(srcImgs);
        fastNlMeansDenoisingColoredMulti_3(srcImgs_mat.nativeObj, dst.nativeObj, imgToDenoiseIndex, temporalWindowSize, h);
    }

    /**
     * Modification of fastNlMeansDenoisingMulti function for colored images sequences
     *
     * @param srcImgs Input 8-bit 3-channel images sequence. All images should have the same type and
     * size.
     * @param imgToDenoiseIndex Target image to denoise index in srcImgs sequence
     * @param temporalWindowSize Number of surrounding images to use for target image denoising. Should
     * be odd. Images from imgToDenoiseIndex - temporalWindowSize / 2 to
     * imgToDenoiseIndex - temporalWindowSize / 2 from srcImgs will be used to denoise
     * srcImgs[imgToDenoiseIndex] image.
     * @param dst Output image with the same size and type as srcImgs images.
     * Should be odd. Recommended value 7 pixels
     * given pixel. Should be odd. Affect performance linearly: greater searchWindowsSize - greater
     * denoising time. Recommended value 21 pixels
     * removes noise but also removes image details, smaller h value preserves details but also preserves
     * some noise.
     *
     * The function converts images to CIELAB colorspace and then separately denoise L and AB components
     * with given h parameters using fastNlMeansDenoisingMulti function.
     */
    public static void fastNlMeansDenoisingColoredMulti(List<Mat> srcImgs, Mat dst, int imgToDenoiseIndex, int temporalWindowSize) {
        Mat srcImgs_mat = Converters.vector_Mat_to_Mat(srcImgs);
        fastNlMeansDenoisingColoredMulti_4(srcImgs_mat.nativeObj, dst.nativeObj, imgToDenoiseIndex, temporalWindowSize);
    }


    //
    // C++:  void cv::denoise_TVL1(vector_Mat observations, Mat result, double lambda = 1.0, int niters = 30)
    //

    /**
     * Primal-dual algorithm is an algorithm for solving special types of variational problems (that is,
     * finding a function to minimize some functional). As the image denoising, in particular, may be seen
     * as the variational problem, primal-dual algorithm then can be used to perform denoising and this is
     * exactly what is implemented.
     *
     * It should be noted, that this implementation was taken from the July 2013 blog entry
     * CITE: MA13 , which also contained (slightly more general) ready-to-use source code on Python.
     * Subsequently, that code was rewritten on C++ with the usage of openCV by Vadim Pisarevsky at the end
     * of July 2013 and finally it was slightly adapted by later authors.
     *
     * Although the thorough discussion and justification of the algorithm involved may be found in
     * CITE: ChambolleEtAl, it might make sense to skim over it here, following CITE: MA13 . To begin
     * with, we consider the 1-byte gray-level images as the functions from the rectangular domain of
     * pixels (it may be seen as set
     * \(\left\{(x,y)\in\mathbb{N}\times\mathbb{N}\mid 1\leq x\leq n,\;1\leq y\leq m\right\}\) for some
     * \(m,\;n\in\mathbb{N}\)) into \(\{0,1,\dots,255\}\). We shall denote the noised images as \(f_i\) and with
     * this view, given some image \(x\) of the same size, we may measure how bad it is by the formula
     *
     * \(\left\|\left\|\nabla x\right\|\right\| + \lambda\sum_i\left\|\left\|x-f_i\right\|\right\|\)
     *
     * \(\|\|\cdot\|\|\) here denotes \(L_2\)-norm and as you see, the first addend states that we want our
     * image to be smooth (ideally, having zero gradient, thus being constant) and the second states that
     * we want our result to be close to the observations we've got. If we treat \(x\) as a function, this is
     * exactly the functional what we seek to minimize and here the Primal-Dual algorithm comes into play.
     *
     * @param observations This array should contain one or more noised versions of the image that is to
     * be restored.
     * @param result Here the denoised image will be stored. There is no need to do pre-allocation of
     * storage space, as it will be automatically allocated, if necessary.
     * @param lambda Corresponds to \(\lambda\) in the formulas above. As it is enlarged, the smooth
     * (blurred) images are treated more favorably than detailed (but maybe more noised) ones. Roughly
     * speaking, as it becomes smaller, the result will be more blur but more sever outliers will be
     * removed.
     * @param niters Number of iterations that the algorithm will run. Of course, as more iterations as
     * better, but it is hard to quantitatively refine this statement, so just use the default and
     * increase it if the results are poor.
     */
    public static void denoise_TVL1(List<Mat> observations, Mat result, double lambda, int niters) {
        Mat observations_mat = Converters.vector_Mat_to_Mat(observations);
        denoise_TVL1_0(observations_mat.nativeObj, result.nativeObj, lambda, niters);
    }

    /**
     * Primal-dual algorithm is an algorithm for solving special types of variational problems (that is,
     * finding a function to minimize some functional). As the image denoising, in particular, may be seen
     * as the variational problem, primal-dual algorithm then can be used to perform denoising and this is
     * exactly what is implemented.
     *
     * It should be noted, that this implementation was taken from the July 2013 blog entry
     * CITE: MA13 , which also contained (slightly more general) ready-to-use source code on Python.
     * Subsequently, that code was rewritten on C++ with the usage of openCV by Vadim Pisarevsky at the end
     * of July 2013 and finally it was slightly adapted by later authors.
     *
     * Although the thorough discussion and justification of the algorithm involved may be found in
     * CITE: ChambolleEtAl, it might make sense to skim over it here, following CITE: MA13 . To begin
     * with, we consider the 1-byte gray-level images as the functions from the rectangular domain of
     * pixels (it may be seen as set
     * \(\left\{(x,y)\in\mathbb{N}\times\mathbb{N}\mid 1\leq x\leq n,\;1\leq y\leq m\right\}\) for some
     * \(m,\;n\in\mathbb{N}\)) into \(\{0,1,\dots,255\}\). We shall denote the noised images as \(f_i\) and with
     * this view, given some image \(x\) of the same size, we may measure how bad it is by the formula
     *
     * \(\left\|\left\|\nabla x\right\|\right\| + \lambda\sum_i\left\|\left\|x-f_i\right\|\right\|\)
     *
     * \(\|\|\cdot\|\|\) here denotes \(L_2\)-norm and as you see, the first addend states that we want our
     * image to be smooth (ideally, having zero gradient, thus being constant) and the second states that
     * we want our result to be close to the observations we've got. If we treat \(x\) as a function, this is
     * exactly the functional what we seek to minimize and here the Primal-Dual algorithm comes into play.
     *
     * @param observations This array should contain one or more noised versions of the image that is to
     * be restored.
     * @param result Here the denoised image will be stored. There is no need to do pre-allocation of
     * storage space, as it will be automatically allocated, if necessary.
     * @param lambda Corresponds to \(\lambda\) in the formulas above. As it is enlarged, the smooth
     * (blurred) images are treated more favorably than detailed (but maybe more noised) ones. Roughly
     * speaking, as it becomes smaller, the result will be more blur but more sever outliers will be
     * removed.
     * better, but it is hard to quantitatively refine this statement, so just use the default and
     * increase it if the results are poor.
     */
    public static void denoise_TVL1(List<Mat> observations, Mat result, double lambda) {
        Mat observations_mat = Converters.vector_Mat_to_Mat(observations);
        denoise_TVL1_1(observations_mat.nativeObj, result.nativeObj, lambda);
    }

    /**
     * Primal-dual algorithm is an algorithm for solving special types of variational problems (that is,
     * finding a function to minimize some functional). As the image denoising, in particular, may be seen
     * as the variational problem, primal-dual algorithm then can be used to perform denoising and this is
     * exactly what is implemented.
     *
     * It should be noted, that this implementation was taken from the July 2013 blog entry
     * CITE: MA13 , which also contained (slightly more general) ready-to-use source code on Python.
     * Subsequently, that code was rewritten on C++ with the usage of openCV by Vadim Pisarevsky at the end
     * of July 2013 and finally it was slightly adapted by later authors.
     *
     * Although the thorough discussion and justification of the algorithm involved may be found in
     * CITE: ChambolleEtAl, it might make sense to skim over it here, following CITE: MA13 . To begin
     * with, we consider the 1-byte gray-level images as the functions from the rectangular domain of
     * pixels (it may be seen as set
     * \(\left\{(x,y)\in\mathbb{N}\times\mathbb{N}\mid 1\leq x\leq n,\;1\leq y\leq m\right\}\) for some
     * \(m,\;n\in\mathbb{N}\)) into \(\{0,1,\dots,255\}\). We shall denote the noised images as \(f_i\) and with
     * this view, given some image \(x\) of the same size, we may measure how bad it is by the formula
     *
     * \(\left\|\left\|\nabla x\right\|\right\| + \lambda\sum_i\left\|\left\|x-f_i\right\|\right\|\)
     *
     * \(\|\|\cdot\|\|\) here denotes \(L_2\)-norm and as you see, the first addend states that we want our
     * image to be smooth (ideally, having zero gradient, thus being constant) and the second states that
     * we want our result to be close to the observations we've got. If we treat \(x\) as a function, this is
     * exactly the functional what we seek to minimize and here the Primal-Dual algorithm comes into play.
     *
     * @param observations This array should contain one or more noised versions of the image that is to
     * be restored.
     * @param result Here the denoised image will be stored. There is no need to do pre-allocation of
     * storage space, as it will be automatically allocated, if necessary.
     * (blurred) images are treated more favorably than detailed (but maybe more noised) ones. Roughly
     * speaking, as it becomes smaller, the result will be more blur but more sever outliers will be
     * removed.
     * better, but it is hard to quantitatively refine this statement, so just use the default and
     * increase it if the results are poor.
     */
    public static void denoise_TVL1(List<Mat> observations, Mat result) {
        Mat observations_mat = Converters.vector_Mat_to_Mat(observations);
        denoise_TVL1_2(observations_mat.nativeObj, result.nativeObj);
    }


    //
    // C++:  Ptr_Tonemap cv::createTonemap(float gamma = 1.0f)
    //

    /**
     * Creates simple linear mapper with gamma correction
     *
     * @param gamma positive value for gamma correction. Gamma value of 1.0 implies no correction, gamma
     * equal to 2.2f is suitable for most displays.
     * Generally gamma &gt; 1 brightens the image and gamma &lt; 1 darkens it.
     * @return automatically generated
     */
    public static Tonemap createTonemap(float gamma) {
        return Tonemap.__fromPtr__(createTonemap_0(gamma));
    }

    /**
     * Creates simple linear mapper with gamma correction
     *
     * equal to 2.2f is suitable for most displays.
     * Generally gamma &gt; 1 brightens the image and gamma &lt; 1 darkens it.
     * @return automatically generated
     */
    public static Tonemap createTonemap() {
        return Tonemap.__fromPtr__(createTonemap_1());
    }


    //
    // C++:  Ptr_TonemapDrago cv::createTonemapDrago(float gamma = 1.0f, float saturation = 1.0f, float bias = 0.85f)
    //

    /**
     * Creates TonemapDrago object
     *
     * @param gamma gamma value for gamma correction. See createTonemap
     * @param saturation positive saturation enhancement value. 1.0 preserves saturation, values greater
     * than 1 increase saturation and values less than 1 decrease it.
     * @param bias value for bias function in [0, 1] range. Values from 0.7 to 0.9 usually give best
     * results, default value is 0.85.
     * @return automatically generated
     */
    public static TonemapDrago createTonemapDrago(float gamma, float saturation, float bias) {
        return TonemapDrago.__fromPtr__(createTonemapDrago_0(gamma, saturation, bias));
    }

    /**
     * Creates TonemapDrago object
     *
     * @param gamma gamma value for gamma correction. See createTonemap
     * @param saturation positive saturation enhancement value. 1.0 preserves saturation, values greater
     * than 1 increase saturation and values less than 1 decrease it.
     * results, default value is 0.85.
     * @return automatically generated
     */
    public static TonemapDrago createTonemapDrago(float gamma, float saturation) {
        return TonemapDrago.__fromPtr__(createTonemapDrago_1(gamma, saturation));
    }

    /**
     * Creates TonemapDrago object
     *
     * @param gamma gamma value for gamma correction. See createTonemap
     * than 1 increase saturation and values less than 1 decrease it.
     * results, default value is 0.85.
     * @return automatically generated
     */
    public static TonemapDrago createTonemapDrago(float gamma) {
        return TonemapDrago.__fromPtr__(createTonemapDrago_2(gamma));
    }

    /**
     * Creates TonemapDrago object
     *
     * than 1 increase saturation and values less than 1 decrease it.
     * results, default value is 0.85.
     * @return automatically generated
     */
    public static TonemapDrago createTonemapDrago() {
        return TonemapDrago.__fromPtr__(createTonemapDrago_3());
    }


    //
    // C++:  Ptr_TonemapReinhard cv::createTonemapReinhard(float gamma = 1.0f, float intensity = 0.0f, float light_adapt = 1.0f, float color_adapt = 0.0f)
    //

    /**
     * Creates TonemapReinhard object
     *
     * @param gamma gamma value for gamma correction. See createTonemap
     * @param intensity result intensity in [-8, 8] range. Greater intensity produces brighter results.
     * @param light_adapt light adaptation in [0, 1] range. If 1 adaptation is based only on pixel
     * value, if 0 it's global, otherwise it's a weighted mean of this two cases.
     * @param color_adapt chromatic adaptation in [0, 1] range. If 1 channels are treated independently,
     * if 0 adaptation level is the same for each channel.
     * @return automatically generated
     */
    public static TonemapReinhard createTonemapReinhard(float gamma, float intensity, float light_adapt, float color_adapt) {
        return TonemapReinhard.__fromPtr__(createTonemapReinhard_0(gamma, intensity, light_adapt, color_adapt));
    }

    /**
     * Creates TonemapReinhard object
     *
     * @param gamma gamma value for gamma correction. See createTonemap
     * @param intensity result intensity in [-8, 8] range. Greater intensity produces brighter results.
     * @param light_adapt light adaptation in [0, 1] range. If 1 adaptation is based only on pixel
     * value, if 0 it's global, otherwise it's a weighted mean of this two cases.
     * if 0 adaptation level is the same for each channel.
     * @return automatically generated
     */
    public static TonemapReinhard createTonemapReinhard(float gamma, float intensity, float light_adapt) {
        return TonemapReinhard.__fromPtr__(createTonemapReinhard_1(gamma, intensity, light_adapt));
    }

    /**
     * Creates TonemapReinhard object
     *
     * @param gamma gamma value for gamma correction. See createTonemap
     * @param intensity result intensity in [-8, 8] range. Greater intensity produces brighter results.
     * value, if 0 it's global, otherwise it's a weighted mean of this two cases.
     * if 0 adaptation level is the same for each channel.
     * @return automatically generated
     */
    public static TonemapReinhard createTonemapReinhard(float gamma, float intensity) {
        return TonemapReinhard.__fromPtr__(createTonemapReinhard_2(gamma, intensity));
    }

    /**
     * Creates TonemapReinhard object
     *
     * @param gamma gamma value for gamma correction. See createTonemap
     * value, if 0 it's global, otherwise it's a weighted mean of this two cases.
     * if 0 adaptation level is the same for each channel.
     * @return automatically generated
     */
    public static TonemapReinhard createTonemapReinhard(float gamma) {
        return TonemapReinhard.__fromPtr__(createTonemapReinhard_3(gamma));
    }

    /**
     * Creates TonemapReinhard object
     *
     * value, if 0 it's global, otherwise it's a weighted mean of this two cases.
     * if 0 adaptation level is the same for each channel.
     * @return automatically generated
     */
    public static TonemapReinhard createTonemapReinhard() {
        return TonemapReinhard.__fromPtr__(createTonemapReinhard_4());
    }


    //
    // C++:  Ptr_TonemapMantiuk cv::createTonemapMantiuk(float gamma = 1.0f, float scale = 0.7f, float saturation = 1.0f)
    //

    /**
     * Creates TonemapMantiuk object
     *
     * @param gamma gamma value for gamma correction. See createTonemap
     * @param scale contrast scale factor. HVS response is multiplied by this parameter, thus compressing
     * dynamic range. Values from 0.6 to 0.9 produce best results.
     * @param saturation saturation enhancement value. See createTonemapDrago
     * @return automatically generated
     */
    public static TonemapMantiuk createTonemapMantiuk(float gamma, float scale, float saturation) {
        return TonemapMantiuk.__fromPtr__(createTonemapMantiuk_0(gamma, scale, saturation));
    }

    /**
     * Creates TonemapMantiuk object
     *
     * @param gamma gamma value for gamma correction. See createTonemap
     * @param scale contrast scale factor. HVS response is multiplied by this parameter, thus compressing
     * dynamic range. Values from 0.6 to 0.9 produce best results.
     * @return automatically generated
     */
    public static TonemapMantiuk createTonemapMantiuk(float gamma, float scale) {
        return TonemapMantiuk.__fromPtr__(createTonemapMantiuk_1(gamma, scale));
    }

    /**
     * Creates TonemapMantiuk object
     *
     * @param gamma gamma value for gamma correction. See createTonemap
     * dynamic range. Values from 0.6 to 0.9 produce best results.
     * @return automatically generated
     */
    public static TonemapMantiuk createTonemapMantiuk(float gamma) {
        return TonemapMantiuk.__fromPtr__(createTonemapMantiuk_2(gamma));
    }

    /**
     * Creates TonemapMantiuk object
     *
     * dynamic range. Values from 0.6 to 0.9 produce best results.
     * @return automatically generated
     */
    public static TonemapMantiuk createTonemapMantiuk() {
        return TonemapMantiuk.__fromPtr__(createTonemapMantiuk_3());
    }


    //
    // C++:  Ptr_AlignMTB cv::createAlignMTB(int max_bits = 6, int exclude_range = 4, bool cut = true)
    //

    /**
     * Creates AlignMTB object
     *
     * @param max_bits logarithm to the base 2 of maximal shift in each dimension. Values of 5 and 6 are
     * usually good enough (31 and 63 pixels shift respectively).
     * @param exclude_range range for exclusion bitmap that is constructed to suppress noise around the
     * median value.
     * @param cut if true cuts images, otherwise fills the new regions with zeros.
     * @return automatically generated
     */
    public static AlignMTB createAlignMTB(int max_bits, int exclude_range, boolean cut) {
        return AlignMTB.__fromPtr__(createAlignMTB_0(max_bits, exclude_range, cut));
    }

    /**
     * Creates AlignMTB object
     *
     * @param max_bits logarithm to the base 2 of maximal shift in each dimension. Values of 5 and 6 are
     * usually good enough (31 and 63 pixels shift respectively).
     * @param exclude_range range for exclusion bitmap that is constructed to suppress noise around the
     * median value.
     * @return automatically generated
     */
    public static AlignMTB createAlignMTB(int max_bits, int exclude_range) {
        return AlignMTB.__fromPtr__(createAlignMTB_1(max_bits, exclude_range));
    }

    /**
     * Creates AlignMTB object
     *
     * @param max_bits logarithm to the base 2 of maximal shift in each dimension. Values of 5 and 6 are
     * usually good enough (31 and 63 pixels shift respectively).
     * median value.
     * @return automatically generated
     */
    public static AlignMTB createAlignMTB(int max_bits) {
        return AlignMTB.__fromPtr__(createAlignMTB_2(max_bits));
    }

    /**
     * Creates AlignMTB object
     *
     * usually good enough (31 and 63 pixels shift respectively).
     * median value.
     * @return automatically generated
     */
    public static AlignMTB createAlignMTB() {
        return AlignMTB.__fromPtr__(createAlignMTB_3());
    }


    //
    // C++:  Ptr_CalibrateDebevec cv::createCalibrateDebevec(int samples = 70, float lambda = 10.0f, bool random = false)
    //

    /**
     * Creates CalibrateDebevec object
     *
     * @param samples number of pixel locations to use
     * @param lambda smoothness term weight. Greater values produce smoother results, but can alter the
     * response.
     * @param random if true sample pixel locations are chosen at random, otherwise they form a
     * rectangular grid.
     * @return automatically generated
     */
    public static CalibrateDebevec createCalibrateDebevec(int samples, float lambda, boolean random) {
        return CalibrateDebevec.__fromPtr__(createCalibrateDebevec_0(samples, lambda, random));
    }

    /**
     * Creates CalibrateDebevec object
     *
     * @param samples number of pixel locations to use
     * @param lambda smoothness term weight. Greater values produce smoother results, but can alter the
     * response.
     * rectangular grid.
     * @return automatically generated
     */
    public static CalibrateDebevec createCalibrateDebevec(int samples, float lambda) {
        return CalibrateDebevec.__fromPtr__(createCalibrateDebevec_1(samples, lambda));
    }

    /**
     * Creates CalibrateDebevec object
     *
     * @param samples number of pixel locations to use
     * response.
     * rectangular grid.
     * @return automatically generated
     */
    public static CalibrateDebevec createCalibrateDebevec(int samples) {
        return CalibrateDebevec.__fromPtr__(createCalibrateDebevec_2(samples));
    }

    /**
     * Creates CalibrateDebevec object
     *
     * response.
     * rectangular grid.
     * @return automatically generated
     */
    public static CalibrateDebevec createCalibrateDebevec() {
        return CalibrateDebevec.__fromPtr__(createCalibrateDebevec_3());
    }


    //
    // C++:  Ptr_CalibrateRobertson cv::createCalibrateRobertson(int max_iter = 30, float threshold = 0.01f)
    //

    /**
     * Creates CalibrateRobertson object
     *
     * @param max_iter maximal number of Gauss-Seidel solver iterations.
     * @param threshold target difference between results of two successive steps of the minimization.
     * @return automatically generated
     */
    public static CalibrateRobertson createCalibrateRobertson(int max_iter, float threshold) {
        return CalibrateRobertson.__fromPtr__(createCalibrateRobertson_0(max_iter, threshold));
    }

    /**
     * Creates CalibrateRobertson object
     *
     * @param max_iter maximal number of Gauss-Seidel solver iterations.
     * @return automatically generated
     */
    public static CalibrateRobertson createCalibrateRobertson(int max_iter) {
        return CalibrateRobertson.__fromPtr__(createCalibrateRobertson_1(max_iter));
    }

    /**
     * Creates CalibrateRobertson object
     *
     * @return automatically generated
     */
    public static CalibrateRobertson createCalibrateRobertson() {
        return CalibrateRobertson.__fromPtr__(createCalibrateRobertson_2());
    }


    //
    // C++:  Ptr_MergeDebevec cv::createMergeDebevec()
    //

    /**
     * Creates MergeDebevec object
     * @return automatically generated
     */
    public static MergeDebevec createMergeDebevec() {
        return MergeDebevec.__fromPtr__(createMergeDebevec_0());
    }


    //
    // C++:  Ptr_MergeMertens cv::createMergeMertens(float contrast_weight = 1.0f, float saturation_weight = 1.0f, float exposure_weight = 0.0f)
    //

    /**
     * Creates MergeMertens object
     *
     * @param contrast_weight contrast measure weight. See MergeMertens.
     * @param saturation_weight saturation measure weight
     * @param exposure_weight well-exposedness measure weight
     * @return automatically generated
     */
    public static MergeMertens createMergeMertens(float contrast_weight, float saturation_weight, float exposure_weight) {
        return MergeMertens.__fromPtr__(createMergeMertens_0(contrast_weight, saturation_weight, exposure_weight));
    }

    /**
     * Creates MergeMertens object
     *
     * @param contrast_weight contrast measure weight. See MergeMertens.
     * @param saturation_weight saturation measure weight
     * @return automatically generated
     */
    public static MergeMertens createMergeMertens(float contrast_weight, float saturation_weight) {
        return MergeMertens.__fromPtr__(createMergeMertens_1(contrast_weight, saturation_weight));
    }

    /**
     * Creates MergeMertens object
     *
     * @param contrast_weight contrast measure weight. See MergeMertens.
     * @return automatically generated
     */
    public static MergeMertens createMergeMertens(float contrast_weight) {
        return MergeMertens.__fromPtr__(createMergeMertens_2(contrast_weight));
    }

    /**
     * Creates MergeMertens object
     *
     * @return automatically generated
     */
    public static MergeMertens createMergeMertens() {
        return MergeMertens.__fromPtr__(createMergeMertens_3());
    }


    //
    // C++:  Ptr_MergeRobertson cv::createMergeRobertson()
    //

    /**
     * Creates MergeRobertson object
     * @return automatically generated
     */
    public static MergeRobertson createMergeRobertson() {
        return MergeRobertson.__fromPtr__(createMergeRobertson_0());
    }


    //
    // C++:  void cv::decolor(Mat src, Mat& grayscale, Mat& color_boost)
    //

    /**
     * Transforms a color image to a grayscale image. It is a basic tool in digital printing, stylized
     * black-and-white photograph rendering, and in many single channel image processing applications
     * CITE: CL12 .
     *
     * @param src Input 8-bit 3-channel image.
     * @param grayscale Output 8-bit 1-channel image.
     * @param color_boost Output 8-bit 3-channel image.
     *
     * This function is to be applied on color images.
     */
    public static void decolor(Mat src, Mat grayscale, Mat color_boost) {
        decolor_0(src.nativeObj, grayscale.nativeObj, color_boost.nativeObj);
    }


    //
    // C++:  void cv::seamlessClone(Mat src, Mat dst, Mat mask, Point p, Mat& blend, int flags)
    //

    /**
     * Image editing tasks concern either global changes (color/intensity corrections, filters,
     * deformations) or local changes concerned to a selection. Here we are interested in achieving local
     * changes, ones that are restricted to a region manually selected (ROI), in a seamless and effortless
     * manner. The extent of the changes ranges from slight distortions to complete replacement by novel
     * content CITE: PM03 .
     *
     * @param src Input 8-bit 3-channel image.
     * @param dst Input 8-bit 3-channel image.
     * @param mask Input 8-bit 1 or 3-channel image.
     * @param p Point in dst image where object is placed.
     * @param blend Output image with the same size and type as dst.
     * @param flags Cloning method that could be cv::NORMAL_CLONE, cv::MIXED_CLONE or cv::MONOCHROME_TRANSFER
     */
    public static void seamlessClone(Mat src, Mat dst, Mat mask, Point p, Mat blend, int flags) {
        seamlessClone_0(src.nativeObj, dst.nativeObj, mask.nativeObj, p.x, p.y, blend.nativeObj, flags);
    }


    //
    // C++:  void cv::colorChange(Mat src, Mat mask, Mat& dst, float red_mul = 1.0f, float green_mul = 1.0f, float blue_mul = 1.0f)
    //

    /**
     * Given an original color image, two differently colored versions of this image can be mixed
     * seamlessly.
     *
     * @param src Input 8-bit 3-channel image.
     * @param mask Input 8-bit 1 or 3-channel image.
     * @param dst Output image with the same size and type as src .
     * @param red_mul R-channel multiply factor.
     * @param green_mul G-channel multiply factor.
     * @param blue_mul B-channel multiply factor.
     *
     * Multiplication factor is between .5 to 2.5.
     */
    public static void colorChange(Mat src, Mat mask, Mat dst, float red_mul, float green_mul, float blue_mul) {
        colorChange_0(src.nativeObj, mask.nativeObj, dst.nativeObj, red_mul, green_mul, blue_mul);
    }

    /**
     * Given an original color image, two differently colored versions of this image can be mixed
     * seamlessly.
     *
     * @param src Input 8-bit 3-channel image.
     * @param mask Input 8-bit 1 or 3-channel image.
     * @param dst Output image with the same size and type as src .
     * @param red_mul R-channel multiply factor.
     * @param green_mul G-channel multiply factor.
     *
     * Multiplication factor is between .5 to 2.5.
     */
    public static void colorChange(Mat src, Mat mask, Mat dst, float red_mul, float green_mul) {
        colorChange_1(src.nativeObj, mask.nativeObj, dst.nativeObj, red_mul, green_mul);
    }

    /**
     * Given an original color image, two differently colored versions of this image can be mixed
     * seamlessly.
     *
     * @param src Input 8-bit 3-channel image.
     * @param mask Input 8-bit 1 or 3-channel image.
     * @param dst Output image with the same size and type as src .
     * @param red_mul R-channel multiply factor.
     *
     * Multiplication factor is between .5 to 2.5.
     */
    public static void colorChange(Mat src, Mat mask, Mat dst, float red_mul) {
        colorChange_2(src.nativeObj, mask.nativeObj, dst.nativeObj, red_mul);
    }

    /**
     * Given an original color image, two differently colored versions of this image can be mixed
     * seamlessly.
     *
     * @param src Input 8-bit 3-channel image.
     * @param mask Input 8-bit 1 or 3-channel image.
     * @param dst Output image with the same size and type as src .
     *
     * Multiplication factor is between .5 to 2.5.
     */
    public static void colorChange(Mat src, Mat mask, Mat dst) {
        colorChange_3(src.nativeObj, mask.nativeObj, dst.nativeObj);
    }


    //
    // C++:  void cv::illuminationChange(Mat src, Mat mask, Mat& dst, float alpha = 0.2f, float beta = 0.4f)
    //

    /**
     * Applying an appropriate non-linear transformation to the gradient field inside the selection and
     * then integrating back with a Poisson solver, modifies locally the apparent illumination of an image.
     *
     * @param src Input 8-bit 3-channel image.
     * @param mask Input 8-bit 1 or 3-channel image.
     * @param dst Output image with the same size and type as src.
     * @param alpha Value ranges between 0-2.
     * @param beta Value ranges between 0-2.
     *
     * This is useful to highlight under-exposed foreground objects or to reduce specular reflections.
     */
    public static void illuminationChange(Mat src, Mat mask, Mat dst, float alpha, float beta) {
        illuminationChange_0(src.nativeObj, mask.nativeObj, dst.nativeObj, alpha, beta);
    }

    /**
     * Applying an appropriate non-linear transformation to the gradient field inside the selection and
     * then integrating back with a Poisson solver, modifies locally the apparent illumination of an image.
     *
     * @param src Input 8-bit 3-channel image.
     * @param mask Input 8-bit 1 or 3-channel image.
     * @param dst Output image with the same size and type as src.
     * @param alpha Value ranges between 0-2.
     *
     * This is useful to highlight under-exposed foreground objects or to reduce specular reflections.
     */
    public static void illuminationChange(Mat src, Mat mask, Mat dst, float alpha) {
        illuminationChange_1(src.nativeObj, mask.nativeObj, dst.nativeObj, alpha);
    }

    /**
     * Applying an appropriate non-linear transformation to the gradient field inside the selection and
     * then integrating back with a Poisson solver, modifies locally the apparent illumination of an image.
     *
     * @param src Input 8-bit 3-channel image.
     * @param mask Input 8-bit 1 or 3-channel image.
     * @param dst Output image with the same size and type as src.
     *
     * This is useful to highlight under-exposed foreground objects or to reduce specular reflections.
     */
    public static void illuminationChange(Mat src, Mat mask, Mat dst) {
        illuminationChange_2(src.nativeObj, mask.nativeObj, dst.nativeObj);
    }


    //
    // C++:  void cv::textureFlattening(Mat src, Mat mask, Mat& dst, float low_threshold = 30, float high_threshold = 45, int kernel_size = 3)
    //

    /**
     * By retaining only the gradients at edge locations, before integrating with the Poisson solver, one
     * washes out the texture of the selected region, giving its contents a flat aspect. Here Canny Edge %Detector is used.
     *
     * @param src Input 8-bit 3-channel image.
     * @param mask Input 8-bit 1 or 3-channel image.
     * @param dst Output image with the same size and type as src.
     * @param low_threshold %Range from 0 to 100.
     * @param high_threshold Value &gt; 100.
     * @param kernel_size The size of the Sobel kernel to be used.
     *
     * <b>Note:</b>
     * The algorithm assumes that the color of the source image is close to that of the destination. This
     * assumption means that when the colors don't match, the source image color gets tinted toward the
     * color of the destination image.
     */
    public static void textureFlattening(Mat src, Mat mask, Mat dst, float low_threshold, float high_threshold, int kernel_size) {
        textureFlattening_0(src.nativeObj, mask.nativeObj, dst.nativeObj, low_threshold, high_threshold, kernel_size);
    }

    /**
     * By retaining only the gradients at edge locations, before integrating with the Poisson solver, one
     * washes out the texture of the selected region, giving its contents a flat aspect. Here Canny Edge %Detector is used.
     *
     * @param src Input 8-bit 3-channel image.
     * @param mask Input 8-bit 1 or 3-channel image.
     * @param dst Output image with the same size and type as src.
     * @param low_threshold %Range from 0 to 100.
     * @param high_threshold Value &gt; 100.
     *
     * <b>Note:</b>
     * The algorithm assumes that the color of the source image is close to that of the destination. This
     * assumption means that when the colors don't match, the source image color gets tinted toward the
     * color of the destination image.
     */
    public static void textureFlattening(Mat src, Mat mask, Mat dst, float low_threshold, float high_threshold) {
        textureFlattening_1(src.nativeObj, mask.nativeObj, dst.nativeObj, low_threshold, high_threshold);
    }

    /**
     * By retaining only the gradients at edge locations, before integrating with the Poisson solver, one
     * washes out the texture of the selected region, giving its contents a flat aspect. Here Canny Edge %Detector is used.
     *
     * @param src Input 8-bit 3-channel image.
     * @param mask Input 8-bit 1 or 3-channel image.
     * @param dst Output image with the same size and type as src.
     * @param low_threshold %Range from 0 to 100.
     *
     * <b>Note:</b>
     * The algorithm assumes that the color of the source image is close to that of the destination. This
     * assumption means that when the colors don't match, the source image color gets tinted toward the
     * color of the destination image.
     */
    public static void textureFlattening(Mat src, Mat mask, Mat dst, float low_threshold) {
        textureFlattening_2(src.nativeObj, mask.nativeObj, dst.nativeObj, low_threshold);
    }

    /**
     * By retaining only the gradients at edge locations, before integrating with the Poisson solver, one
     * washes out the texture of the selected region, giving its contents a flat aspect. Here Canny Edge %Detector is used.
     *
     * @param src Input 8-bit 3-channel image.
     * @param mask Input 8-bit 1 or 3-channel image.
     * @param dst Output image with the same size and type as src.
     *
     * <b>Note:</b>
     * The algorithm assumes that the color of the source image is close to that of the destination. This
     * assumption means that when the colors don't match, the source image color gets tinted toward the
     * color of the destination image.
     */
    public static void textureFlattening(Mat src, Mat mask, Mat dst) {
        textureFlattening_3(src.nativeObj, mask.nativeObj, dst.nativeObj);
    }


    //
    // C++:  void cv::edgePreservingFilter(Mat src, Mat& dst, int flags = 1, float sigma_s = 60, float sigma_r = 0.4f)
    //

    /**
     * Filtering is the fundamental operation in image and video processing. Edge-preserving smoothing
     * filters are used in many different applications CITE: EM11 .
     *
     * @param src Input 8-bit 3-channel image.
     * @param dst Output 8-bit 3-channel image.
     * @param flags Edge preserving filters: cv::RECURS_FILTER or cv::NORMCONV_FILTER
     * @param sigma_s %Range between 0 to 200.
     * @param sigma_r %Range between 0 to 1.
     */
    public static void edgePreservingFilter(Mat src, Mat dst, int flags, float sigma_s, float sigma_r) {
        edgePreservingFilter_0(src.nativeObj, dst.nativeObj, flags, sigma_s, sigma_r);
    }

    /**
     * Filtering is the fundamental operation in image and video processing. Edge-preserving smoothing
     * filters are used in many different applications CITE: EM11 .
     *
     * @param src Input 8-bit 3-channel image.
     * @param dst Output 8-bit 3-channel image.
     * @param flags Edge preserving filters: cv::RECURS_FILTER or cv::NORMCONV_FILTER
     * @param sigma_s %Range between 0 to 200.
     */
    public static void edgePreservingFilter(Mat src, Mat dst, int flags, float sigma_s) {
        edgePreservingFilter_1(src.nativeObj, dst.nativeObj, flags, sigma_s);
    }

    /**
     * Filtering is the fundamental operation in image and video processing. Edge-preserving smoothing
     * filters are used in many different applications CITE: EM11 .
     *
     * @param src Input 8-bit 3-channel image.
     * @param dst Output 8-bit 3-channel image.
     * @param flags Edge preserving filters: cv::RECURS_FILTER or cv::NORMCONV_FILTER
     */
    public static void edgePreservingFilter(Mat src, Mat dst, int flags) {
        edgePreservingFilter_2(src.nativeObj, dst.nativeObj, flags);
    }

    /**
     * Filtering is the fundamental operation in image and video processing. Edge-preserving smoothing
     * filters are used in many different applications CITE: EM11 .
     *
     * @param src Input 8-bit 3-channel image.
     * @param dst Output 8-bit 3-channel image.
     */
    public static void edgePreservingFilter(Mat src, Mat dst) {
        edgePreservingFilter_3(src.nativeObj, dst.nativeObj);
    }


    //
    // C++:  void cv::detailEnhance(Mat src, Mat& dst, float sigma_s = 10, float sigma_r = 0.15f)
    //

    /**
     * This filter enhances the details of a particular image.
     *
     * @param src Input 8-bit 3-channel image.
     * @param dst Output image with the same size and type as src.
     * @param sigma_s %Range between 0 to 200.
     * @param sigma_r %Range between 0 to 1.
     */
    public static void detailEnhance(Mat src, Mat dst, float sigma_s, float sigma_r) {
        detailEnhance_0(src.nativeObj, dst.nativeObj, sigma_s, sigma_r);
    }

    /**
     * This filter enhances the details of a particular image.
     *
     * @param src Input 8-bit 3-channel image.
     * @param dst Output image with the same size and type as src.
     * @param sigma_s %Range between 0 to 200.
     */
    public static void detailEnhance(Mat src, Mat dst, float sigma_s) {
        detailEnhance_1(src.nativeObj, dst.nativeObj, sigma_s);
    }

    /**
     * This filter enhances the details of a particular image.
     *
     * @param src Input 8-bit 3-channel image.
     * @param dst Output image with the same size and type as src.
     */
    public static void detailEnhance(Mat src, Mat dst) {
        detailEnhance_2(src.nativeObj, dst.nativeObj);
    }


    //
    // C++:  void cv::pencilSketch(Mat src, Mat& dst1, Mat& dst2, float sigma_s = 60, float sigma_r = 0.07f, float shade_factor = 0.02f)
    //

    /**
     * Pencil-like non-photorealistic line drawing
     *
     * @param src Input 8-bit 3-channel image.
     * @param dst1 Output 8-bit 1-channel image.
     * @param dst2 Output image with the same size and type as src.
     * @param sigma_s %Range between 0 to 200.
     * @param sigma_r %Range between 0 to 1.
     * @param shade_factor %Range between 0 to 0.1.
     */
    public static void pencilSketch(Mat src, Mat dst1, Mat dst2, float sigma_s, float sigma_r, float shade_factor) {
        pencilSketch_0(src.nativeObj, dst1.nativeObj, dst2.nativeObj, sigma_s, sigma_r, shade_factor);
    }

    /**
     * Pencil-like non-photorealistic line drawing
     *
     * @param src Input 8-bit 3-channel image.
     * @param dst1 Output 8-bit 1-channel image.
     * @param dst2 Output image with the same size and type as src.
     * @param sigma_s %Range between 0 to 200.
     * @param sigma_r %Range between 0 to 1.
     */
    public static void pencilSketch(Mat src, Mat dst1, Mat dst2, float sigma_s, float sigma_r) {
        pencilSketch_1(src.nativeObj, dst1.nativeObj, dst2.nativeObj, sigma_s, sigma_r);
    }

    /**
     * Pencil-like non-photorealistic line drawing
     *
     * @param src Input 8-bit 3-channel image.
     * @param dst1 Output 8-bit 1-channel image.
     * @param dst2 Output image with the same size and type as src.
     * @param sigma_s %Range between 0 to 200.
     */
    public static void pencilSketch(Mat src, Mat dst1, Mat dst2, float sigma_s) {
        pencilSketch_2(src.nativeObj, dst1.nativeObj, dst2.nativeObj, sigma_s);
    }

    /**
     * Pencil-like non-photorealistic line drawing
     *
     * @param src Input 8-bit 3-channel image.
     * @param dst1 Output 8-bit 1-channel image.
     * @param dst2 Output image with the same size and type as src.
     */
    public static void pencilSketch(Mat src, Mat dst1, Mat dst2) {
        pencilSketch_3(src.nativeObj, dst1.nativeObj, dst2.nativeObj);
    }


    //
    // C++:  void cv::stylization(Mat src, Mat& dst, float sigma_s = 60, float sigma_r = 0.45f)
    //

    /**
     * Stylization aims to produce digital imagery with a wide variety of effects not focused on
     * photorealism. Edge-aware filters are ideal for stylization, as they can abstract regions of low
     * contrast while preserving, or enhancing, high-contrast features.
     *
     * @param src Input 8-bit 3-channel image.
     * @param dst Output image with the same size and type as src.
     * @param sigma_s %Range between 0 to 200.
     * @param sigma_r %Range between 0 to 1.
     */
    public static void stylization(Mat src, Mat dst, float sigma_s, float sigma_r) {
        stylization_0(src.nativeObj, dst.nativeObj, sigma_s, sigma_r);
    }

    /**
     * Stylization aims to produce digital imagery with a wide variety of effects not focused on
     * photorealism. Edge-aware filters are ideal for stylization, as they can abstract regions of low
     * contrast while preserving, or enhancing, high-contrast features.
     *
     * @param src Input 8-bit 3-channel image.
     * @param dst Output image with the same size and type as src.
     * @param sigma_s %Range between 0 to 200.
     */
    public static void stylization(Mat src, Mat dst, float sigma_s) {
        stylization_1(src.nativeObj, dst.nativeObj, sigma_s);
    }

    /**
     * Stylization aims to produce digital imagery with a wide variety of effects not focused on
     * photorealism. Edge-aware filters are ideal for stylization, as they can abstract regions of low
     * contrast while preserving, or enhancing, high-contrast features.
     *
     * @param src Input 8-bit 3-channel image.
     * @param dst Output image with the same size and type as src.
     */
    public static void stylization(Mat src, Mat dst) {
        stylization_2(src.nativeObj, dst.nativeObj);
    }


    //
    // C++:  void cv::cuda::nonLocalMeans(GpuMat src, GpuMat& dst, float h, int search_window = 21, int block_size = 7, int borderMode = BORDER_DEFAULT, Stream stream = Stream::Null())
    //

    // Unknown type 'GpuMat' (I), skipping the function


    //
    // C++:  void cv::cuda::fastNlMeansDenoising(GpuMat src, GpuMat& dst, float h, int search_window = 21, int block_size = 7, Stream stream = Stream::Null())
    //

    // Unknown type 'GpuMat' (I), skipping the function


    //
    // C++:  void cv::cuda::fastNlMeansDenoisingColored(GpuMat src, GpuMat& dst, float h_luminance, float photo_render, int search_window = 21, int block_size = 7, Stream stream = Stream::Null())
    //

    // Unknown type 'GpuMat' (I), skipping the function




    // C++:  void cv::inpaint(Mat src, Mat inpaintMask, Mat& dst, double inpaintRadius, int flags)
    private static native void inpaint_0(long src_nativeObj, long inpaintMask_nativeObj, long dst_nativeObj, double inpaintRadius, int flags);

    // C++:  void cv::fastNlMeansDenoising(Mat src, Mat& dst, float h = 3, int templateWindowSize = 7, int searchWindowSize = 21)
    private static native void fastNlMeansDenoising_0(long src_nativeObj, long dst_nativeObj, float h, int templateWindowSize, int searchWindowSize);
    private static native void fastNlMeansDenoising_1(long src_nativeObj, long dst_nativeObj, float h, int templateWindowSize);
    private static native void fastNlMeansDenoising_2(long src_nativeObj, long dst_nativeObj, float h);
    private static native void fastNlMeansDenoising_3(long src_nativeObj, long dst_nativeObj);

    // C++:  void cv::fastNlMeansDenoising(Mat src, Mat& dst, vector_float h, int templateWindowSize = 7, int searchWindowSize = 21, int normType = NORM_L2)
    private static native void fastNlMeansDenoising_4(long src_nativeObj, long dst_nativeObj, long h_mat_nativeObj, int templateWindowSize, int searchWindowSize, int normType);
    private static native void fastNlMeansDenoising_5(long src_nativeObj, long dst_nativeObj, long h_mat_nativeObj, int templateWindowSize, int searchWindowSize);
    private static native void fastNlMeansDenoising_6(long src_nativeObj, long dst_nativeObj, long h_mat_nativeObj, int templateWindowSize);
    private static native void fastNlMeansDenoising_7(long src_nativeObj, long dst_nativeObj, long h_mat_nativeObj);

    // C++:  void cv::fastNlMeansDenoisingColored(Mat src, Mat& dst, float h = 3, float hColor = 3, int templateWindowSize = 7, int searchWindowSize = 21)
    private static native void fastNlMeansDenoisingColored_0(long src_nativeObj, long dst_nativeObj, float h, float hColor, int templateWindowSize, int searchWindowSize);
    private static native void fastNlMeansDenoisingColored_1(long src_nativeObj, long dst_nativeObj, float h, float hColor, int templateWindowSize);
    private static native void fastNlMeansDenoisingColored_2(long src_nativeObj, long dst_nativeObj, float h, float hColor);
    private static native void fastNlMeansDenoisingColored_3(long src_nativeObj, long dst_nativeObj, float h);
    private static native void fastNlMeansDenoisingColored_4(long src_nativeObj, long dst_nativeObj);

    // C++:  void cv::fastNlMeansDenoisingMulti(vector_Mat srcImgs, Mat& dst, int imgToDenoiseIndex, int temporalWindowSize, float h = 3, int templateWindowSize = 7, int searchWindowSize = 21)
    private static native void fastNlMeansDenoisingMulti_0(long srcImgs_mat_nativeObj, long dst_nativeObj, int imgToDenoiseIndex, int temporalWindowSize, float h, int templateWindowSize, int searchWindowSize);
    private static native void fastNlMeansDenoisingMulti_1(long srcImgs_mat_nativeObj, long dst_nativeObj, int imgToDenoiseIndex, int temporalWindowSize, float h, int templateWindowSize);
    private static native void fastNlMeansDenoisingMulti_2(long srcImgs_mat_nativeObj, long dst_nativeObj, int imgToDenoiseIndex, int temporalWindowSize, float h);
    private static native void fastNlMeansDenoisingMulti_3(long srcImgs_mat_nativeObj, long dst_nativeObj, int imgToDenoiseIndex, int temporalWindowSize);

    // C++:  void cv::fastNlMeansDenoisingMulti(vector_Mat srcImgs, Mat& dst, int imgToDenoiseIndex, int temporalWindowSize, vector_float h, int templateWindowSize = 7, int searchWindowSize = 21, int normType = NORM_L2)
    private static native void fastNlMeansDenoisingMulti_4(long srcImgs_mat_nativeObj, long dst_nativeObj, int imgToDenoiseIndex, int temporalWindowSize, long h_mat_nativeObj, int templateWindowSize, int searchWindowSize, int normType);
    private static native void fastNlMeansDenoisingMulti_5(long srcImgs_mat_nativeObj, long dst_nativeObj, int imgToDenoiseIndex, int temporalWindowSize, long h_mat_nativeObj, int templateWindowSize, int searchWindowSize);
    private static native void fastNlMeansDenoisingMulti_6(long srcImgs_mat_nativeObj, long dst_nativeObj, int imgToDenoiseIndex, int temporalWindowSize, long h_mat_nativeObj, int templateWindowSize);
    private static native void fastNlMeansDenoisingMulti_7(long srcImgs_mat_nativeObj, long dst_nativeObj, int imgToDenoiseIndex, int temporalWindowSize, long h_mat_nativeObj);

    // C++:  void cv::fastNlMeansDenoisingColoredMulti(vector_Mat srcImgs, Mat& dst, int imgToDenoiseIndex, int temporalWindowSize, float h = 3, float hColor = 3, int templateWindowSize = 7, int searchWindowSize = 21)
    private static native void fastNlMeansDenoisingColoredMulti_0(long srcImgs_mat_nativeObj, long dst_nativeObj, int imgToDenoiseIndex, int temporalWindowSize, float h, float hColor, int templateWindowSize, int searchWindowSize);
    private static native void fastNlMeansDenoisingColoredMulti_1(long srcImgs_mat_nativeObj, long dst_nativeObj, int imgToDenoiseIndex, int temporalWindowSize, float h, float hColor, int templateWindowSize);
    private static native void fastNlMeansDenoisingColoredMulti_2(long srcImgs_mat_nativeObj, long dst_nativeObj, int imgToDenoiseIndex, int temporalWindowSize, float h, float hColor);
    private static native void fastNlMeansDenoisingColoredMulti_3(long srcImgs_mat_nativeObj, long dst_nativeObj, int imgToDenoiseIndex, int temporalWindowSize, float h);
    private static native void fastNlMeansDenoisingColoredMulti_4(long srcImgs_mat_nativeObj, long dst_nativeObj, int imgToDenoiseIndex, int temporalWindowSize);

    // C++:  void cv::denoise_TVL1(vector_Mat observations, Mat result, double lambda = 1.0, int niters = 30)
    private static native void denoise_TVL1_0(long observations_mat_nativeObj, long result_nativeObj, double lambda, int niters);
    private static native void denoise_TVL1_1(long observations_mat_nativeObj, long result_nativeObj, double lambda);
    private static native void denoise_TVL1_2(long observations_mat_nativeObj, long result_nativeObj);

    // C++:  Ptr_Tonemap cv::createTonemap(float gamma = 1.0f)
    private static native long createTonemap_0(float gamma);
    private static native long createTonemap_1();

    // C++:  Ptr_TonemapDrago cv::createTonemapDrago(float gamma = 1.0f, float saturation = 1.0f, float bias = 0.85f)
    private static native long createTonemapDrago_0(float gamma, float saturation, float bias);
    private static native long createTonemapDrago_1(float gamma, float saturation);
    private static native long createTonemapDrago_2(float gamma);
    private static native long createTonemapDrago_3();

    // C++:  Ptr_TonemapReinhard cv::createTonemapReinhard(float gamma = 1.0f, float intensity = 0.0f, float light_adapt = 1.0f, float color_adapt = 0.0f)
    private static native long createTonemapReinhard_0(float gamma, float intensity, float light_adapt, float color_adapt);
    private static native long createTonemapReinhard_1(float gamma, float intensity, float light_adapt);
    private static native long createTonemapReinhard_2(float gamma, float intensity);
    private static native long createTonemapReinhard_3(float gamma);
    private static native long createTonemapReinhard_4();

    // C++:  Ptr_TonemapMantiuk cv::createTonemapMantiuk(float gamma = 1.0f, float scale = 0.7f, float saturation = 1.0f)
    private static native long createTonemapMantiuk_0(float gamma, float scale, float saturation);
    private static native long createTonemapMantiuk_1(float gamma, float scale);
    private static native long createTonemapMantiuk_2(float gamma);
    private static native long createTonemapMantiuk_3();

    // C++:  Ptr_AlignMTB cv::createAlignMTB(int max_bits = 6, int exclude_range = 4, bool cut = true)
    private static native long createAlignMTB_0(int max_bits, int exclude_range, boolean cut);
    private static native long createAlignMTB_1(int max_bits, int exclude_range);
    private static native long createAlignMTB_2(int max_bits);
    private static native long createAlignMTB_3();

    // C++:  Ptr_CalibrateDebevec cv::createCalibrateDebevec(int samples = 70, float lambda = 10.0f, bool random = false)
    private static native long createCalibrateDebevec_0(int samples, float lambda, boolean random);
    private static native long createCalibrateDebevec_1(int samples, float lambda);
    private static native long createCalibrateDebevec_2(int samples);
    private static native long createCalibrateDebevec_3();

    // C++:  Ptr_CalibrateRobertson cv::createCalibrateRobertson(int max_iter = 30, float threshold = 0.01f)
    private static native long createCalibrateRobertson_0(int max_iter, float threshold);
    private static native long createCalibrateRobertson_1(int max_iter);
    private static native long createCalibrateRobertson_2();

    // C++:  Ptr_MergeDebevec cv::createMergeDebevec()
    private static native long createMergeDebevec_0();

    // C++:  Ptr_MergeMertens cv::createMergeMertens(float contrast_weight = 1.0f, float saturation_weight = 1.0f, float exposure_weight = 0.0f)
    private static native long createMergeMertens_0(float contrast_weight, float saturation_weight, float exposure_weight);
    private static native long createMergeMertens_1(float contrast_weight, float saturation_weight);
    private static native long createMergeMertens_2(float contrast_weight);
    private static native long createMergeMertens_3();

    // C++:  Ptr_MergeRobertson cv::createMergeRobertson()
    private static native long createMergeRobertson_0();

    // C++:  void cv::decolor(Mat src, Mat& grayscale, Mat& color_boost)
    private static native void decolor_0(long src_nativeObj, long grayscale_nativeObj, long color_boost_nativeObj);

    // C++:  void cv::seamlessClone(Mat src, Mat dst, Mat mask, Point p, Mat& blend, int flags)
    private static native void seamlessClone_0(long src_nativeObj, long dst_nativeObj, long mask_nativeObj, double p_x, double p_y, long blend_nativeObj, int flags);

    // C++:  void cv::colorChange(Mat src, Mat mask, Mat& dst, float red_mul = 1.0f, float green_mul = 1.0f, float blue_mul = 1.0f)
    private static native void colorChange_0(long src_nativeObj, long mask_nativeObj, long dst_nativeObj, float red_mul, float green_mul, float blue_mul);
    private static native void colorChange_1(long src_nativeObj, long mask_nativeObj, long dst_nativeObj, float red_mul, float green_mul);
    private static native void colorChange_2(long src_nativeObj, long mask_nativeObj, long dst_nativeObj, float red_mul);
    private static native void colorChange_3(long src_nativeObj, long mask_nativeObj, long dst_nativeObj);

    // C++:  void cv::illuminationChange(Mat src, Mat mask, Mat& dst, float alpha = 0.2f, float beta = 0.4f)
    private static native void illuminationChange_0(long src_nativeObj, long mask_nativeObj, long dst_nativeObj, float alpha, float beta);
    private static native void illuminationChange_1(long src_nativeObj, long mask_nativeObj, long dst_nativeObj, float alpha);
    private static native void illuminationChange_2(long src_nativeObj, long mask_nativeObj, long dst_nativeObj);

    // C++:  void cv::textureFlattening(Mat src, Mat mask, Mat& dst, float low_threshold = 30, float high_threshold = 45, int kernel_size = 3)
    private static native void textureFlattening_0(long src_nativeObj, long mask_nativeObj, long dst_nativeObj, float low_threshold, float high_threshold, int kernel_size);
    private static native void textureFlattening_1(long src_nativeObj, long mask_nativeObj, long dst_nativeObj, float low_threshold, float high_threshold);
    private static native void textureFlattening_2(long src_nativeObj, long mask_nativeObj, long dst_nativeObj, float low_threshold);
    private static native void textureFlattening_3(long src_nativeObj, long mask_nativeObj, long dst_nativeObj);

    // C++:  void cv::edgePreservingFilter(Mat src, Mat& dst, int flags = 1, float sigma_s = 60, float sigma_r = 0.4f)
    private static native void edgePreservingFilter_0(long src_nativeObj, long dst_nativeObj, int flags, float sigma_s, float sigma_r);
    private static native void edgePreservingFilter_1(long src_nativeObj, long dst_nativeObj, int flags, float sigma_s);
    private static native void edgePreservingFilter_2(long src_nativeObj, long dst_nativeObj, int flags);
    private static native void edgePreservingFilter_3(long src_nativeObj, long dst_nativeObj);

    // C++:  void cv::detailEnhance(Mat src, Mat& dst, float sigma_s = 10, float sigma_r = 0.15f)
    private static native void detailEnhance_0(long src_nativeObj, long dst_nativeObj, float sigma_s, float sigma_r);
    private static native void detailEnhance_1(long src_nativeObj, long dst_nativeObj, float sigma_s);
    private static native void detailEnhance_2(long src_nativeObj, long dst_nativeObj);

    // C++:  void cv::pencilSketch(Mat src, Mat& dst1, Mat& dst2, float sigma_s = 60, float sigma_r = 0.07f, float shade_factor = 0.02f)
    private static native void pencilSketch_0(long src_nativeObj, long dst1_nativeObj, long dst2_nativeObj, float sigma_s, float sigma_r, float shade_factor);
    private static native void pencilSketch_1(long src_nativeObj, long dst1_nativeObj, long dst2_nativeObj, float sigma_s, float sigma_r);
    private static native void pencilSketch_2(long src_nativeObj, long dst1_nativeObj, long dst2_nativeObj, float sigma_s);
    private static native void pencilSketch_3(long src_nativeObj, long dst1_nativeObj, long dst2_nativeObj);

    // C++:  void cv::stylization(Mat src, Mat& dst, float sigma_s = 60, float sigma_r = 0.45f)
    private static native void stylization_0(long src_nativeObj, long dst_nativeObj, float sigma_s, float sigma_r);
    private static native void stylization_1(long src_nativeObj, long dst_nativeObj, float sigma_s);
    private static native void stylization_2(long src_nativeObj, long dst_nativeObj);

}