diff --git a/README.md b/README.md
index 19900f766..f3609e516 100644
--- a/README.md
+++ b/README.md
@@ -18,6 +18,7 @@
- [Building The Project](#building-the-project)
- [Installing The Game](#installing-the-game)
- [Running The Game](#running-the-game)
+- [Third Party Libraries](#third-party-libraries)
- [Anti-virus Warning](#anti-virus-warning)
- [Legal](#legal)
- [License](#license)
@@ -66,6 +67,11 @@ You can download the v2.03 patch in English, French, German and Spanish.
# Running The Game
Copy the built executables from the build directory to the Tiberian Sun directory. Run `LaunchVinifera.exe` to start the game with the Vinifera project applied. For more information on how to use Vinifera, please read the documention or you can join the **C&C Modding Haven** [Discord server]() and use the **#vinifera-chat** channel.
+# Third-Party Libraries
+Vinifera makes use of third-party libraries to help implement features. Below is a list of libraries used by the project;
+ - [LodePNG](https://lodev.org/lodepng/)
+ - [Image-Resampler](https://github.com/ramenhut/image-resampler)
+
# Anti-virus Warning
Anti-virus software like Windows Defender could mark the binaries built from the DLL configuration in this project as a virus. We would like to assure that this is a false-positive and that these is completely safe to use. If you are still unsure about running these binaries on your system, your are welcome to join our Discord server where one of the developers can explain the process used by this project in detail.
diff --git a/src/libs/image-resampler/Base/vnBase.h b/src/libs/image-resampler/Base/vnBase.h
new file mode 100644
index 000000000..efd94a7b4
--- /dev/null
+++ b/src/libs/image-resampler/Base/vnBase.h
@@ -0,0 +1,231 @@
+
+// Copyright (c) 1998-2009 Joe Bertolami. All Right Reserved.
+//
+// vnBase.h
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice, this
+// list of conditions and the following disclaimer.
+//
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Additional Information:
+//
+// For more information, visit http://www.bertolami.com.
+
+#ifndef __VN_BASE_H__
+#define __VN_BASE_H__
+
+/**********************************************************************************
+//
+// Platform definitions
+//
+**********************************************************************************/
+
+#if defined ( _WIN32 ) || defined ( _WIN64 )
+ #include "windows.h"
+ #pragma warning (disable : 4244) // conversion, possible loss of data
+ #pragma warning (disable : 4018) // signed / unsigned mismatch
+ #pragma warning (disable : 4996) // deprecated interfaces
+ #pragma warning (disable : 4221) // empty translation unit
+ #pragma warning (disable : 4273) // inconsistent linkage
+
+ #define VN_PLATFORM_WINDOWS // building a Windows application
+
+#elif defined ( __APPLE__ )
+ #include "TargetConditionals.h"
+ #include "unistd.h"
+ #include "sys/types.h"
+ #include "ctype.h"
+
+ #if TARGET_OS_IPHONE || TARGET_IPHONE_SIMULATOR
+ #define VN_PLATFORM_IOS // building an application for iOS
+ #elif TARGET_OS_MAC
+ #define VN_PLATFORM_MACOSX // building a Mac OSX application
+ #endif
+#else
+ #error "Unsupported target platform detected."
+#endif
+
+/**********************************************************************************
+//
+// Debug definitions
+//
+**********************************************************************************/
+
+#if defined ( VN_PLATFORM_WINDOWS )
+ #ifdef _DEBUG
+ #define VN_DEBUG _DEBUG
+ #elif defined ( DEBUG )
+ #define VN_DEBUG DEBUG
+ #endif
+ #if defined( VN_DEBUG ) && !defined( debug_break )
+ #define debug_break __debugbreak
+ #endif
+ #define __VN_FUNCTION__ __FUNCTION__
+#elif defined ( VN_PLATFORM_IOS ) || defined ( VN_PLATFORM_MACOSX )
+ #ifdef DEBUG
+ #define VN_DEBUG DEBUG
+ #if !defined( debug_break )
+ #define debug_break() __builtin_trap()
+ #endif
+ #endif
+ #define __VN_FUNCTION__ __func__
+#endif
+
+/**********************************************************************************
+//
+// Standard headers
+//
+**********************************************************************************/
+
+#include "stdio.h"
+#include "stdlib.h"
+#include "stdarg.h"
+#include "string.h"
+#include "math.h"
+
+/**********************************************************************************
+//
+// Standard types
+//
+**********************************************************************************/
+
+#if defined ( VN_PLATFORM_WINDOWS )
+ // Types are already defined by the platform.
+#elif defined ( VN_PLATFORM_IOS ) || defined ( VN_PLATFORM_MACOSX )
+ typedef int64_t INT64;
+ typedef int32_t INT32;
+ typedef int16_t INT16;
+ typedef int8_t INT8;
+
+ typedef u_int64_t UINT64;
+ typedef u_int32_t UINT32;
+ typedef u_int16_t UINT16;
+ typedef u_int8_t UINT8;
+#endif
+
+typedef float FLOAT32;
+typedef double FLOAT64;
+typedef wchar_t WCHAR;
+
+#define VN_KB ( (UINT32) 1024 )
+#define VN_MB ( VN_KB * VN_KB )
+#define VN_GB ( VN_MB * VN_KB )
+
+#define CONST const
+#ifndef CHAR
+#define CHAR char
+#endif
+
+/**********************************************************************************
+//
+// Status codes
+//
+**********************************************************************************/
+
+typedef UINT8 VN_STATUS;
+
+#define VN_SUCCESS (0)
+#define VN_ERROR_INVALIDARG (1)
+#define VN_ERROR_NOTIMPL (2)
+#define VN_ERROR_OUTOFMEMORY (3)
+#define VN_ERROR_UNDEFINED (4)
+#define VN_ERROR_HARDWAREFAIL (5)
+#define VN_ERROR_INVALID_INDEX (6)
+#define VN_ERROR_CAPACITY_LIMIT (7)
+#define VN_ERROR_INVALID_RESOURCE (8)
+#define VN_ERROR_OPERATION_TIMEDOUT (9)
+#define VN_ERROR_EXECUTION_FAILURE (10)
+#define VN_ERROR_PERMISSION_DENIED (11)
+#define VN_ERROR_IO_FAILURE (12)
+#define VN_ERROR_RESOURCE_UNREACHABLE (13)
+#define VN_ERROR_SYSTEM_FAILURE (14)
+#define VN_ERROR_NOT_READY (15)
+#define VN_ERROR_OPERATION_COMPLETED (16)
+#define VN_ERROR_RESOURCE_UNUSED (17)
+
+#define VN_SUCCEEDED( code ) ( (code) == VN_SUCCESS )
+#define VN_FAILED( code ) ( !VN_SUCCEEDED( code ) )
+
+/**********************************************************************************
+//
+// Debug support
+//
+**********************************************************************************/
+
+#ifdef VN_DEBUG
+ #define VN_PARAM_CHECK (1)
+ #define VN_ERR(fmt, ...) do { printf("[VN-ERR] "); \
+ printf(fmt, ##__VA_ARGS__); \
+ printf("\n"); debug_break(); \
+ } while(0)
+
+ #define VN_MSG(fmt, ...) do { printf("[VN-MSG] "); \
+ printf(fmt, ##__VA_ARGS__); \
+ printf("\n"); \
+ } while(0)
+#else
+ #define VN_PARAM_CHECK (0)
+ #define VN_ERR(fmt, ...)
+ #define VN_MSG(fmt, ...) do { printf("[VN-MSG] "); \
+ printf(fmt, ##__VA_ARGS__); \
+ printf("\n"); \
+ } while(0)
+#endif
+
+#define VN_ERROR_CREATE_STRING(x) ((char *) #x)
+#define vnPostError(x) vnPostErrorInternal(x, VN_ERROR_CREATE_STRING(x), __VN_FUNCTION__, (char *) __FILE__, __LINE__)
+inline UINT32 vnPostErrorInternal(UINT8 error, CONST CHAR *error_string, CONST CHAR *function, CONST CHAR *filename, UINT32 line)
+{
+#ifdef VN_DEBUG
+ CONST CHAR *path = filename;
+
+ for (INT32 i = (INT32) strlen(filename); i >= 0; --i)
+ {
+ if (filename[ i ] == '/')
+ break;
+
+ path = &filename[i];
+ }
+
+ VN_ERR("*** RIP *** %s @ %s in %s:%i", error_string, function, path, line);
+#endif
+ return error;
+}
+
+/**********************************************************************************
+//
+// Standard helpers
+//
+**********************************************************************************/
+
+#define VN_DISABLE_COPY_AND_ASSIGN(type) \
+ type(const type &rvalue); \
+ type &operator = (const type &rvalue);
+
+#define VN_TEMPLATE_T template
+#define VN_TEMPLATE_SPEC template <>
+
+#define VN_VARG(fmt) va_list argptr; \
+ CHAR text[1*VN_KB] = {0}; \
+ va_start(argptr, fmt); \
+ vsprintf(text, fmt, argptr); \
+ va_end(argptr);
+
+#endif // __VN_BASE_H__
\ No newline at end of file
diff --git a/src/libs/image-resampler/Base/vnHalf.cpp b/src/libs/image-resampler/Base/vnHalf.cpp
new file mode 100644
index 000000000..c57ece38f
--- /dev/null
+++ b/src/libs/image-resampler/Base/vnHalf.cpp
@@ -0,0 +1,262 @@
+
+#include "vnHalf.h"
+
+FLOAT16::FLOAT16() : m_uiFormat(0) {}
+
+FLOAT16::FLOAT16( CONST FLOAT16 & rhs ) : m_uiFormat( rhs.m_uiFormat ) {}
+
+FLOAT16::FLOAT16( CONST FLOAT32 & rhs )
+{
+ (*this) = rhs;
+}
+
+FLOAT16::~FLOAT16() {}
+
+FLOAT16 & FLOAT16::operator = ( CONST FLOAT16 & rhs )
+{
+ m_uiFormat = rhs.m_uiFormat;
+
+ return (*this);
+}
+
+FLOAT16 & FLOAT16::operator = ( CONST FLOAT32 & rhs )
+{
+ (*this) = ToFloat16( rhs );
+
+ return (*this);
+}
+
+BOOL FLOAT16::operator == ( CONST FLOAT16 & rhs )
+{
+ return m_uiFormat == rhs.m_uiFormat;
+}
+
+BOOL FLOAT16::operator != ( CONST FLOAT16 & rhs )
+{
+ return !( (*this) == rhs );
+}
+
+FLOAT32 FLOAT16::ToFloat32( FLOAT16 rhs )
+{
+ FLOAT32 fOutput = 0; // floating point result
+ UINT32 * uiOutput = (UINT32 *) &fOutput; // bit manipulated output
+
+ if ( 0 == rhs.m_uiFormat ) return 0.0f; // +zero
+ else if ( 0x8000 == rhs.m_uiFormat ) return -0.0f; // -zero
+
+ UINT32 uiHalfSignBit = GET_HALF_SIGN_BIT( rhs.m_uiFormat );
+ UINT32 uiHalfMantBits = GET_HALF_MANT_BITS( rhs.m_uiFormat ) << 13;
+ INT32 iHalfExpBits = GET_HALF_EXP_BITS( rhs.m_uiFormat );
+
+ //
+ // Next we check for additional special cases:
+ //
+
+ if ( 0 == iHalfExpBits )
+ {
+ //
+ // Denormalized values
+ //
+
+ SET_SINGLE_SIGN_BIT( uiHalfSignBit, (*uiOutput) );
+ SET_SINGLE_EXP_BITS( 0, (*uiOutput) );
+ SET_SINGLE_MANT_BITS( uiHalfMantBits, (*uiOutput) );
+ }
+
+ else if ( 0x1F == iHalfExpBits )
+ {
+ if ( 0 == uiHalfMantBits )
+ {
+ //
+ // +- Infinity
+ //
+
+ (*uiOutput) = ( uiHalfSignBit ? SINGLE_NEG_INFINITY : SINGLE_POS_INFINITY );
+ }
+ else
+ {
+ //
+ // (S/Q)NaN
+ //
+
+ SET_SINGLE_SIGN_BIT( uiHalfSignBit, (*uiOutput) );
+ SET_SINGLE_EXP_BITS( 0xFF, (*uiOutput) );
+ SET_SINGLE_MANT_BITS( uiHalfMantBits, (*uiOutput) );
+ }
+ }
+
+ else
+ {
+ //
+ // Normalized values
+ //
+
+ SET_SINGLE_SIGN_BIT( uiHalfSignBit, (*uiOutput) );
+ SET_SINGLE_EXP_BITS( ( iHalfExpBits - 15 ) + 127, (*uiOutput) );
+ SET_SINGLE_MANT_BITS( uiHalfMantBits, (*uiOutput) );
+ }
+
+ //
+ // ATP: uiOutput equals the bit pattern of our floating point result.
+ //
+
+ return fOutput;
+}
+
+FLOAT16 FLOAT16::ToFloat16( FLOAT32 rhs )
+{
+ //
+ // (!) Truncation will occur for values outside the representable range for float16.
+ //
+
+ FLOAT16 fOutput;
+ UINT32 uiInput = *((UINT32 *) &rhs );
+
+ if ( 0.0f == rhs )
+ {
+ fOutput.m_uiFormat = 0;
+ return fOutput;
+ }
+
+ else if ( -0.0f == rhs )
+ {
+ fOutput.m_uiFormat = 0x8000;
+ return fOutput;
+ }
+
+ UINT32 uiSignBit = GET_SINGLE_SIGN_BIT( uiInput );
+ UINT32 uiMantBits = GET_SINGLE_MANT_BITS( uiInput ) >> 13;
+ INT32 iExpBits = GET_SINGLE_EXP_BITS( uiInput );
+
+ //
+ // Next we check for additional special cases:
+ //
+
+ if ( 0 == iExpBits )
+ {
+ //
+ // Denormalized values
+ //
+
+ SET_HALF_SIGN_BIT( uiSignBit, fOutput.m_uiFormat );
+ SET_HALF_EXP_BITS( 0, fOutput.m_uiFormat );
+ SET_HALF_MANT_BITS( uiMantBits, fOutput.m_uiFormat );
+ }
+
+ else if ( 0xFF == iExpBits )
+ {
+ if ( 0 == uiMantBits )
+ {
+ //
+ // +- Infinity
+ //
+
+ fOutput.m_uiFormat = ( uiSignBit ? HALF_NEG_INFINITY : HALF_POS_INFINITY );
+ }
+ else
+ {
+ //
+ // (S/Q)NaN
+ //
+
+ SET_HALF_SIGN_BIT( uiSignBit, fOutput.m_uiFormat );
+ SET_HALF_EXP_BITS( 0x1F, fOutput.m_uiFormat );
+ SET_HALF_MANT_BITS( uiMantBits, fOutput.m_uiFormat );
+ }
+ }
+
+ else
+ {
+ //
+ // Normalized values
+ //
+
+ INT32 iExponent = iExpBits - 127 + 15;
+
+ if ( iExponent < 0 ) { iExponent = 0; }
+ else if ( iExponent > 31 ) iExponent = 31;
+
+ SET_HALF_SIGN_BIT( uiSignBit, fOutput.m_uiFormat );
+ SET_HALF_EXP_BITS( iExponent, fOutput.m_uiFormat );
+ SET_HALF_MANT_BITS( uiMantBits, fOutput.m_uiFormat );
+ }
+
+ //
+ // ATP: uiOutput equals the bit pattern of our floating point result.
+ //
+
+ return fOutput;
+}
+
+
+FLOAT32 FLOAT16::ToFloat32Fast( FLOAT16 rhs )
+{
+ FLOAT32 fOutput = 0; // floating point result
+ UINT32 * uiOutput = (UINT32 *) &fOutput; // bit manipulated output
+
+ if ( 0 == rhs.m_uiFormat ) return 0.0f; // +zero
+ else if ( 0x8000 == rhs.m_uiFormat ) return -0.0f; // -zero
+
+ UINT32 uiHalfSignBit = GET_HALF_SIGN_BIT( rhs.m_uiFormat );
+ UINT32 uiHalfMantBits = GET_HALF_MANT_BITS( rhs.m_uiFormat ) << 13;
+ INT32 iHalfExpBits = GET_HALF_EXP_BITS( rhs.m_uiFormat );
+
+ //
+ // Normalized values
+ //
+
+ SET_SINGLE_SIGN_BIT( uiHalfSignBit, (*uiOutput) );
+ SET_SINGLE_EXP_BITS( ( iHalfExpBits - 15 ) + 127, (*uiOutput) );
+ SET_SINGLE_MANT_BITS( uiHalfMantBits, (*uiOutput) );
+
+ //
+ // ATP: uiOutput equals the bit pattern of our floating point result.
+ //
+
+ return fOutput;
+}
+
+FLOAT16 FLOAT16::ToFloat16Fast( FLOAT32 rhs )
+{
+ //
+ // (!) Truncation will occur for values outside the representable range for float16.
+ //
+
+ FLOAT16 fOutput;
+ UINT32 uiInput = *((UINT32 *) &rhs );
+
+ if ( 0.0f == rhs )
+ {
+ fOutput.m_uiFormat = 0;
+ return fOutput;
+ }
+
+ else if ( -0.0f == rhs )
+ {
+ fOutput.m_uiFormat = 0x8000;
+ return fOutput;
+ }
+
+ UINT32 uiSignBit = GET_SINGLE_SIGN_BIT( uiInput );
+ UINT32 uiMantBits = GET_SINGLE_MANT_BITS( uiInput ) >> 13;
+ INT32 iExpBits = GET_SINGLE_EXP_BITS( uiInput );
+
+ //
+ // Normalized values
+ //
+
+ INT32 iExponent = iExpBits - 127 + 15;
+
+ if ( iExponent < 0 ) { iExponent = 0; }
+ else if ( iExponent > 31 ) iExponent = 31;
+
+ SET_HALF_SIGN_BIT( uiSignBit, fOutput.m_uiFormat );
+ SET_HALF_EXP_BITS( iExponent, fOutput.m_uiFormat );
+ SET_HALF_MANT_BITS( uiMantBits, fOutput.m_uiFormat );
+
+ //
+ // ATP: uiOutput equals the bit pattern of our floating point result.
+ //
+
+ return fOutput;
+}
diff --git a/src/libs/image-resampler/Base/vnHalf.h b/src/libs/image-resampler/Base/vnHalf.h
new file mode 100644
index 000000000..f32f3a7ea
--- /dev/null
+++ b/src/libs/image-resampler/Base/vnHalf.h
@@ -0,0 +1,149 @@
+
+//
+// Copyright (c) 2002-2009 Joe Bertolami. All Right Reserved.
+//
+// vnHalf.h
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice, this
+// list of conditions and the following disclaimer.
+//
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Description:
+//
+// IEEE 754 Floating Point Standard
+//
+// Float32: Float16:
+//
+// 1 bit sign 1 bit sign
+// 8 bit exponent 5 bit exponent
+// 23 bit mantissa 10 bit mantissa
+//
+// Bias of 127 Bias of 15
+//
+// Special Values:
+//
+// +-Zero: s, 0e, 0m
+// +-Denormalized: s, 0e, (1 -> max)m
+// +-Normalized: s, (1 -> [max-1])e, m
+// +-Infinity: s, (all 1)e, (all 0s)m
+// +-SNaN: s, (all 1)e, (1 -> [max-high_bit])m
+// +-QNaN: s, (all 1)e, (high_bit -> all 1s)m
+//
+// Additional Information:
+//
+// For more information, visit http://www.bertolami.com.
+
+#ifndef __VN_HALF_H__
+#define __VN_HALF_H__
+
+#include "vnBase.h"
+
+//
+// FLOAT16 Helpers
+//
+
+#define HALF_SIGN_SHIFT (15)
+#define HALF_EXP_SHIFT (10)
+#define HALF_MANT_SHIFT (0)
+
+#define HALF_SIGN_MASK (0x8000)
+#define HALF_EXP_MASK (0x7C00)
+#define HALF_MANT_MASK (0x03FF)
+
+#define HALF_POS_INFINITY (0x7C00)
+#define HALF_NEG_INFINITY (0xFC00)
+
+#define GET_HALF_SIGN_BIT(x) ((x) >> HALF_SIGN_SHIFT)
+#define GET_HALF_EXP_BITS(x) (((x) >> HALF_EXP_SHIFT) & 0x1F)
+#define GET_HALF_MANT_BITS(x) ((x) & HALF_MANT_MASK)
+
+#define SET_HALF_SIGN_BIT(x,dest) ((dest) = ((((x) << HALF_SIGN_SHIFT) & HALF_SIGN_MASK) | ( (dest) & ( HALF_EXP_MASK | HALF_MANT_MASK ))))
+#define SET_HALF_EXP_BITS(x,dest) ((dest) = ((((x) << HALF_EXP_SHIFT) & HALF_EXP_MASK) | ( (dest) & ( HALF_SIGN_MASK | HALF_MANT_MASK ))))
+#define SET_HALF_MANT_BITS(x,dest) ((dest) = ((((x) << HALF_MANT_SHIFT) & HALF_MANT_MASK) | ( (dest) & ( HALF_SIGN_MASK | HALF_EXP_MASK ))))
+
+//
+// FLOAT32 Helpers
+//
+
+#define SINGLE_SIGN_SHIFT (31)
+#define SINGLE_EXP_SHIFT (23)
+#define SINGLE_MANT_SHIFT (0)
+
+#define SINGLE_SIGN_MASK (0x80000000)
+#define SINGLE_EXP_MASK (0x7F800000)
+#define SINGLE_MANT_MASK (0x007FFFFF)
+
+#define SINGLE_POS_INFINITY (0x7F800000)
+#define SINGLE_NEG_INFINITY (0xFF800000)
+
+#define GET_SINGLE_SIGN_BIT(x) ((x) >> SINGLE_SIGN_SHIFT)
+#define GET_SINGLE_EXP_BITS(x) (((x) >> SINGLE_EXP_SHIFT) & 0xFF)
+#define GET_SINGLE_MANT_BITS(x) ((x) & SINGLE_MANT_MASK)
+
+#define SET_SINGLE_SIGN_BIT(x,dest) ((dest) = ((((x) << SINGLE_SIGN_SHIFT) & SINGLE_SIGN_MASK) | ( (dest) & ( SINGLE_EXP_MASK | SINGLE_MANT_MASK ))))
+#define SET_SINGLE_EXP_BITS(x,dest) ((dest) = ((((x) << SINGLE_EXP_SHIFT) & SINGLE_EXP_MASK) | ( (dest) & ( SINGLE_SIGN_MASK | SINGLE_MANT_MASK ))))
+#define SET_SINGLE_MANT_BITS(x,dest) ((dest) = ((((x) << SINGLE_MANT_SHIFT) & SINGLE_MANT_MASK) | ( (dest) & ( SINGLE_SIGN_MASK | SINGLE_EXP_MASK ))))
+
+//
+// (!) Note: the float16 (i.e. half) format is provided for storage
+// purposes only, and should not be used for computation.
+//
+// As a result, we do not provide any arithmetic operators.
+//
+
+class FLOAT16
+{
+ UINT16 m_uiFormat;
+
+public:
+
+ FLOAT16();
+ FLOAT16( CONST FLOAT16 & rhs );
+ FLOAT16( CONST FLOAT32 & rhs );
+ ~FLOAT16();
+
+ //
+ // Member operations
+ //
+
+ BOOL operator == ( CONST FLOAT16 & rhs );
+ BOOL operator != ( CONST FLOAT16 & rhs );
+
+ FLOAT16 & operator = ( CONST FLOAT16 & rhs );
+ FLOAT16 & operator = ( CONST FLOAT32 & rhs );
+
+ //
+ // Conversions -- note that we purposely avoid cast operators
+ //
+
+ static FLOAT32 ToFloat32( FLOAT16 rhs );
+ static FLOAT16 ToFloat16( FLOAT32 rhs );
+
+ //
+ // The fast variants handle only the most common normalized conversion case.
+ // If a conversion requires QNaN, SNaN, Inf, or denormalized handling, do not
+ // use these.
+ //
+
+ static FLOAT32 ToFloat32Fast( FLOAT16 rhs );
+ static FLOAT16 ToFloat16Fast( FLOAT32 rhs );
+};
+
+#endif // __VN_HALF_H__
diff --git a/src/libs/image-resampler/Base/vnImage.cpp b/src/libs/image-resampler/Base/vnImage.cpp
new file mode 100644
index 000000000..e8092d080
--- /dev/null
+++ b/src/libs/image-resampler/Base/vnImage.cpp
@@ -0,0 +1,227 @@
+
+#include "vnImage.h"
+
+CVImage::CVImage()
+{
+ m_uiImageFormat = VN_IMAGE_FORMAT_NONE;
+ m_uiWidthInPixels = 0;
+ m_uiHeightInPixels = 0;
+ m_uiBitsPerPixel = 0;
+ m_uiChannelCount = 0;
+ m_pbyDataBuffer = 0;
+ m_uiDataCapacity = 0;
+}
+
+CVImage::~CVImage()
+{
+ if ( VN_FAILED( Deallocate() ) )
+ {
+ vnPostError( VN_ERROR_EXECUTION_FAILURE );
+ }
+}
+
+UINT32 CVImage::RowPitch() CONST
+{
+ return ( m_uiWidthInPixels * m_uiBitsPerPixel ) >> 3;
+}
+
+UINT32 CVImage::SlicePitch() CONST
+{
+ return m_uiDataCapacity;
+}
+
+UINT32 CVImage::BlockOffset( UINT32 i, UINT32 j ) CONST
+{
+ return ( RowPitch() * j ) + ( ( i * m_uiBitsPerPixel ) >> 3 );
+}
+
+
+VN_STATUS CVImage::Allocate( UINT32 uiSize )
+{
+ if ( VN_PARAM_CHECK )
+ {
+ if ( 0 == uiSize )
+ {
+ return vnPostError( VN_ERROR_INVALIDARG );
+ }
+ }
+
+ if ( VN_FAILED( Deallocate() ) )
+ {
+ return vnPostError( VN_ERROR_EXECUTION_FAILURE );
+ }
+
+ m_pbyDataBuffer = new UINT8[ uiSize ];
+
+ if ( !m_pbyDataBuffer )
+ {
+ return vnPostError( VN_ERROR_OUTOFMEMORY );
+ }
+
+ m_uiDataCapacity = uiSize;
+
+ //
+ // Zero out our initial image memory (for good measure).
+ //
+
+ memset( m_pbyDataBuffer, 0, uiSize );
+
+ return VN_SUCCESS;
+}
+
+VN_STATUS CVImage::Deallocate()
+{
+ delete [] m_pbyDataBuffer;
+
+ m_pbyDataBuffer = 0;
+ m_uiDataCapacity = 0;
+
+ return VN_SUCCESS;
+}
+
+VN_STATUS CVImage::SetDimension( UINT32 uiNewWidth, UINT32 uiNewHeight )
+{
+ if ( VN_PARAM_CHECK )
+ {
+ if ( 0 == uiNewWidth || 0 == uiNewHeight )
+ {
+ return vnPostError( VN_ERROR_INVALIDARG );
+ }
+ }
+
+ //
+ // Check for an uninitialized image
+ //
+
+ if ( 0 == m_uiBitsPerPixel || VN_IMAGE_FORMAT_NONE == m_uiImageFormat )
+ {
+ //
+ // You must call SetFormat prior to calling this function, so that
+ // we know how to allocate the image.
+ //
+
+ return vnPostError( VN_ERROR_INVALID_RESOURCE );
+ }
+
+ if ( uiNewWidth == QueryWidth() && uiNewHeight == QueryHeight() )
+ {
+ return VN_SUCCESS;
+ }
+
+ //
+ // All images are required to use byte aligned pixel rates, so there is
+ // no need to align the allocation size.
+ //
+
+ if ( VN_FAILED( Allocate( ( uiNewWidth * uiNewHeight * m_uiBitsPerPixel ) >> 3 ) ) )
+ {
+ return vnPostError( VN_ERROR_OUTOFMEMORY );
+ }
+
+ m_uiWidthInPixels = uiNewWidth;
+ m_uiHeightInPixels = uiNewHeight;
+
+ return VN_SUCCESS;
+}
+
+VN_STATUS CVImage::SetFormat( VN_IMAGE_FORMAT format )
+{
+ if ( VN_PARAM_CHECK )
+ {
+ if ( 0 == VN_IMAGE_CHANNEL_COUNT( format ) )
+ {
+ return vnPostError( VN_ERROR_INVALIDARG );
+ }
+ }
+
+ UINT32 uiRateTotal = VN_IMAGE_PIXEL_RATE( format );
+ UINT8 uiChannelCount = VN_IMAGE_CHANNEL_COUNT( format );
+
+ if ( 0 != ( uiRateTotal % 8 ) )
+ {
+ //
+ // The format is invalid -- it does not contain a byte aligned pixel rate.
+ //
+
+ return vnPostError( VN_ERROR_INVALIDARG );
+ }
+
+ m_uiImageFormat = format;
+ m_uiBitsPerPixel = uiRateTotal;
+ m_uiChannelCount = uiChannelCount;
+
+ return VN_SUCCESS;
+}
+
+UINT32 CVImage::QueryWidth() CONST
+{
+ return m_uiWidthInPixels;
+}
+
+UINT32 CVImage::QueryHeight() CONST
+{
+ return m_uiHeightInPixels;
+}
+
+UINT8 * CVImage::QueryData() CONST
+{
+ return m_pbyDataBuffer;
+}
+
+UINT8 CVImage::QueryBitsPerPixel() CONST
+{
+ return m_uiBitsPerPixel;
+}
+
+VN_IMAGE_FORMAT CVImage::QueryFormat() CONST
+{
+ return m_uiImageFormat;
+}
+
+UINT8 CVImage::QueryChannelCount() CONST
+{
+ return m_uiChannelCount;
+}
+
+VN_STATUS vnCreateImage( VN_IMAGE_FORMAT format, UINT32 uiWidth, UINT32 uiHeight, CVImage * pOutImage )
+{
+ if ( VN_PARAM_CHECK )
+ {
+ if ( 0 == uiWidth || 0 == uiHeight )
+ {
+ return vnPostError( VN_ERROR_INVALIDARG );
+ }
+
+ if ( !pOutImage )
+ {
+ return vnPostError( VN_ERROR_INVALIDARG );
+ }
+ }
+
+ if ( VN_FAILED( (pOutImage)->SetFormat( format ) ) )
+ {
+ return vnPostError( VN_ERROR_EXECUTION_FAILURE );
+ }
+
+ if ( VN_FAILED( (pOutImage)->SetDimension( uiWidth, uiHeight ) ) )
+ {
+ return vnPostError( VN_ERROR_EXECUTION_FAILURE );
+ }
+
+ return VN_SUCCESS;
+}
+
+VN_STATUS vnDestroyImage( CVImage * pInImage )
+{
+ if ( !pInImage )
+ {
+ return VN_SUCCESS;
+ }
+
+ if ( VN_FAILED( pInImage->Deallocate() ) )
+ {
+ return vnPostError( VN_ERROR_EXECUTION_FAILURE );
+ }
+
+ return VN_SUCCESS;
+}
\ No newline at end of file
diff --git a/src/libs/image-resampler/Base/vnImage.h b/src/libs/image-resampler/Base/vnImage.h
new file mode 100644
index 000000000..22a2cb5ea
--- /dev/null
+++ b/src/libs/image-resampler/Base/vnImage.h
@@ -0,0 +1,161 @@
+
+//
+// Copyright (c) 2002-2009 Joe Bertolami. All Right Reserved.
+//
+// vnImage.h
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice, this
+// list of conditions and the following disclaimer.
+//
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Additional Information:
+//
+// For more information, visit http://www.bertolami.com.
+//
+
+#ifndef __VN_IMAGE_H__
+#define __VN_IMAGE_H__
+
+#include "vnBase.h"
+#include "vnMath.h"
+#include "vnImageFormat.h"
+
+#define VN_IS_IMAGE_VALID(x) ( VN_IS_FORMAT_VALID((x).QueryFormat()) && \
+ (x).QueryBitsPerPixel() != 0 && \
+ (x).QueryWidth() != 0 && \
+ (x).QueryHeight() != 0 )
+
+class CVImage
+{
+ friend VN_STATUS vnCreateImage( VN_IMAGE_FORMAT format, UINT32 uiWidth, UINT32 uiHeight, CVImage * pOutImage );
+
+ friend VN_STATUS vnDestroyImage( CVImage * pInImage );
+
+private:
+
+ VN_IMAGE_FORMAT m_uiImageFormat;
+
+ //
+ // As a result of the diversity in supported image formats, we treat our
+ // data set as a collection of blocks, which may either contain one or more
+ // channels of color, or otherwise encoded information.
+ //
+ // When speaking logically about the image however, we operate in terms of pixels.
+ //
+
+ UINT32 m_uiWidthInPixels;
+ UINT32 m_uiHeightInPixels;
+ UINT32 m_uiBitsPerPixel;
+ UINT8 m_uiChannelCount;
+ UINT8 * m_pbyDataBuffer;
+ UINT32 m_uiDataCapacity;
+
+private:
+
+ //
+ // Allocation management
+ //
+
+ VN_STATUS Allocate( UINT32 uiSize );
+ VN_STATUS Deallocate();
+
+ //
+ // A CVImage is considered uninitialized if its m_uiRTTI field is set to zero
+ // (no format). Images must have an empty data buffer in this scenario.
+ //
+
+ VN_STATUS SetFormat( VN_IMAGE_FORMAT format );
+
+ //
+ // SetDimension will automatically manage the memory of the object. This is the
+ // primary interface that should be used for reserving memory for the image. Note
+ // that the image must contain a valid format prior to calling SetDimension.
+ //
+
+ VN_STATUS SetDimension( UINT32 uiNewWidth, UINT32 uiNewHeight );
+
+public:
+
+ CVImage();
+ virtual ~CVImage();
+
+ //
+ // Query interfaces
+ //
+
+ UINT32 QueryWidth() CONST; // the width of the image in pixels
+ UINT32 QueryHeight() CONST; // the height of the image in pixels
+ UINT8 * QueryData() CONST; // base pointer of the image data
+ UINT8 QueryBitsPerPixel() CONST; // channel-specific block size
+ UINT8 QueryChannelCount() CONST; // number of valid channels
+ VN_IMAGE_FORMAT QueryFormat() CONST;
+
+public:
+
+ //
+ // Row Pitch
+ //
+ // RowPitch is the byte delta between two adjacent rows of pixels in the image.
+ // This function takes alignment into consideration and may provide a value that
+ // is greater than the byte width of the visible image.
+ //
+
+ UINT32 RowPitch() CONST;
+
+ //
+ // Slice Pitch
+ //
+ // SlicePitch is the byte size of the entire image. This size may extend beyond the
+ // edge of the last row and column of the image, due to alignment and tiling
+ // requirements on certain platforms.
+ //
+
+ UINT32 SlicePitch() CONST;
+
+ //
+ // Block Offset
+ //
+ // Block offset returns the byte offset from the start of the image to pixel (i,j).
+ // Formats are required to use byte aligned pixel rates, so this function will always
+ // point to the start of a pixel block.
+ //
+
+ UINT32 BlockOffset( UINT32 i, UINT32 j ) CONST;
+};
+
+//
+// CVImage Creator
+//
+// This is the sole interface for creating new CVImage objects. We require the use of this function
+// to guarantee that image creation is high-level atomic.
+//
+
+VN_STATUS vnCreateImage( VN_IMAGE_FORMAT format, UINT32 uiWidth, UINT32 uiHeight, CVImage * pOutImage );
+
+//
+// CVImage Destructor
+//
+// CVImage objects will destroy themselves automatically, but we encourage the use of this discreet
+// 'destructor' to allow for future internal memory management.
+//
+
+VN_STATUS vnDestroyImage( CVImage * pInImage );
+
+#endif // __VN_IMAGE_H__
diff --git a/src/libs/image-resampler/Base/vnImageFormat.h b/src/libs/image-resampler/Base/vnImageFormat.h
new file mode 100644
index 000000000..ef7a93a61
--- /dev/null
+++ b/src/libs/image-resampler/Base/vnImageFormat.h
@@ -0,0 +1,262 @@
+
+//
+// Copyright (c) 2002-2009 Joe Bertolami. All Right Reserved.
+//
+// vnImageFormat.h
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice, this
+// list of conditions and the following disclaimer.
+//
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Additional Information:
+//
+// For more information, visit http://www.bertolami.com.
+//
+
+#ifndef __VN_IMAGE_FORMAT_H__
+#define __VN_IMAGE_FORMAT_H__
+
+#include "vnBase.h"
+
+//
+// An image format is a 32 bit value that specifies a unique image identifier with the
+// following pattern:
+//
+// Bit(s) Title Description
+// ------ -------- -------------------------------------
+// 31: Endian, zero for little, one for big
+// 30: Precision, zero for fixed, one for float
+// 29: Space, zero for color, one for depth
+// 28: Signed, zero for unsigned, one for signed
+// 27-24: Reserved, reserved for future compression support
+// 23-18: Channel 0, specifies the bit count of the first channel
+// 17-12: Channel 1, specifies the bit count of the second channel
+// 11-6: Channel 2: specifies the bit count of the third channel
+// 5-0: Channel 3: specifies the bit count of the fourth channel
+//
+
+#define VN_IMAGE_MAX_CHANNEL_COUNT (4)
+#define VN_IMAGE_MAX_CHANNEL_SHIFT (6)
+
+#define VN_IMAGE_ENDIAN_MASK (0x01)
+#define VN_IMAGE_PRECISION_MASK (0x01)
+#define VN_IMAGE_SPACE_MASK (0x01)
+#define VN_IMAGE_SIGNED_MASK (0x01)
+#define VN_IMAGE_RESERVED_MASK (0x0F)
+#define VN_IMAGE_CHANNEL_MASK (0x3F)
+
+#define VN_IMAGE_ENDIAN_SHIFT (0x1F)
+#define VN_IMAGE_PRECISION_SHIFT (0x1E)
+#define VN_IMAGE_SPACE_SHIFT (0x1D)
+#define VN_IMAGE_SIGNED_SHIFT (0x1C)
+#define VN_IMAGE_RESERVED_SHIFT (0x18)
+#define VN_IMAGE_CHANNEL_0_SHIFT (0x12)
+#define VN_IMAGE_CHANNEL_1_SHIFT (0x0C)
+#define VN_IMAGE_CHANNEL_2_SHIFT (0x06)
+#define VN_IMAGE_CHANNEL_3_SHIFT (0x00)
+
+//
+// The following restrictions exist when defining new image formats. If these
+// restrictions are violated, the format may still be specified, but is not
+// considered valid. In these cases, individual filter support should be verified
+// and not assumed.
+//
+// Restrictions:
+//
+// 1. The pixel rate (i.e. the sum of the bit rates of all valid channels), should
+// be 8-aligned.
+//
+// 2. Channels may not mix float and fixed data types.
+//
+// 3. The current maximum supported bit rate per channel is 32, thus the maximum
+// pixel rate is 128.
+//
+// 4. Channels must be used contiguously. You may not skip a channel (e.g. use /
+// R and B channels without G). A format with a channel count of N must consume
+// its first N channels. If you require more advanced channel selection, opt for
+// a set of planar images.
+//
+// 5. All multi-channel formats must be interleaved. Planar multi-channel formats
+// are supported at a higher level using multiple single channel planar images.
+//
+
+enum VN_IMAGE_ENDIAN
+{
+ VN_IMAGE_ENDIAN_LITTLE = 0,
+ VN_IMAGE_ENDIAN_BIG = 1,
+ VN_IMAGE_ENDIAN_FORCE_BYTE = 0x7F
+};
+
+enum VN_IMAGE_PRECISION
+{
+ VN_IMAGE_PRECISION_FIXED = 0,
+ VN_IMAGE_PRECISION_FLOAT = 1,
+ VN_IMAGE_PRECISION_FORCE_BYTE = 0x7F
+};
+
+enum VN_IMAGE_SPACE
+{
+ VN_IMAGE_SPACE_COLOR = 0,
+ VN_IMAGE_SPACE_DEPTH = 1,
+ VN_IMAGE_SPACE_FORCE_BYTE = 0x7F
+};
+
+//
+// (!) Note: we support heterogeneous channel rates, with a maximum pixel rate of
+// 256 bits per pixel for both float and fixed formats. The following
+// examples demonstrate (but are not limited to) the types of formats
+// that are permitted with this format:
+//
+// R16G32B32F ----- float format with 16, 32, and 32 bpc for RGB respectively.
+//
+// R3G3B2 ----- fixed format with 3, 3, and 2 bpc for RGB respectively.
+//
+// R32F ----- float format, single channel, 32 bit
+//
+
+#define VN_IMAGE_CHANNEL_RATE( c, f ) ( ( (f) >> ( ( ( VN_IMAGE_MAX_CHANNEL_COUNT - 1 ) - (c) ) * \
+ VN_IMAGE_MAX_CHANNEL_SHIFT ) ) & VN_IMAGE_CHANNEL_MASK )
+
+#define VN_IS_FLOAT_FORMAT( x ) ( ( (x) >> VN_IMAGE_PRECISION_SHIFT ) & VN_IMAGE_PRECISION_MASK )
+
+#define VN_IS_SIGNED_FORMAT( x ) ( ( (x) >> VN_IMAGE_SIGNED_SHIFT ) & VN_IMAGE_SIGNED_MASK )
+
+#define VN_IMAGE_PIXEL_RATE( x ) ( ( ( (x) >> VN_IMAGE_CHANNEL_0_SHIFT ) & VN_IMAGE_CHANNEL_MASK ) + \
+ ( ( (x) >> VN_IMAGE_CHANNEL_1_SHIFT ) & VN_IMAGE_CHANNEL_MASK ) + \
+ ( ( (x) >> VN_IMAGE_CHANNEL_2_SHIFT ) & VN_IMAGE_CHANNEL_MASK ) + \
+ ( ( (x) >> VN_IMAGE_CHANNEL_3_SHIFT ) & VN_IMAGE_CHANNEL_MASK ) )
+
+#define VN_IMAGE_CHANNEL_COUNT( x ) ( !!( ( (x) >> VN_IMAGE_CHANNEL_0_SHIFT ) & VN_IMAGE_CHANNEL_MASK ) + \
+ !!( ( (x) >> VN_IMAGE_CHANNEL_1_SHIFT ) & VN_IMAGE_CHANNEL_MASK ) + \
+ !!( ( (x) >> VN_IMAGE_CHANNEL_2_SHIFT ) & VN_IMAGE_CHANNEL_MASK ) + \
+ !!( ( (x) >> VN_IMAGE_CHANNEL_3_SHIFT ) & VN_IMAGE_CHANNEL_MASK ) )
+
+//
+// For a format to be considered valid, it must have:
+//
+// 1. at least one non-zero sized channel
+//
+// 2. correct usage of sign and float: float formats are not allowed to be signed (because they have no unsigned)
+//
+
+#define VN_IS_FORMAT_VALID( f ) ( VN_IMAGE_CHANNEL_RATE( 0, f ) || \
+ VN_IMAGE_CHANNEL_RATE( 1, f ) || \
+ VN_IMAGE_CHANNEL_RATE( 2, f ) || \
+ VN_IMAGE_CHANNEL_RATE( 3, f ) || \
+ ( VN_IS_FLOAT_FORMAT(f) && VN_IS_SIGNED_FORMAT(f) ) )
+
+
+//
+// (!) N.B. We distinguish between rgba and depth color spaces in order to better support
+// graphics hardware that manages these separately. This allows the engine to
+// distinguish between identical data layouts that require different usage.
+//
+
+#define VN_MAKE_IMAGE_FORMAT( endian, precision, space, sign, reserved, zero, one, two, three ) ( \
+ ( ( ( endian ) & VN_IMAGE_ENDIAN_MASK ) << VN_IMAGE_ENDIAN_SHIFT ) | \
+ ( ( ( precision ) & VN_IMAGE_PRECISION_MASK ) << VN_IMAGE_PRECISION_SHIFT ) | \
+ ( ( ( space ) & VN_IMAGE_SPACE_MASK ) << VN_IMAGE_SPACE_SHIFT ) | \
+ ( ( ( sign ) & VN_IMAGE_SIGNED_MASK ) << VN_IMAGE_SIGNED_SHIFT ) | \
+ ( ( ( reserved ) & VN_IMAGE_RESERVED_MASK ) << VN_IMAGE_RESERVED_SHIFT ) | \
+ ( ( ( zero ) & VN_IMAGE_CHANNEL_MASK ) << VN_IMAGE_CHANNEL_0_SHIFT ) | \
+ ( ( ( one ) & VN_IMAGE_CHANNEL_MASK ) << VN_IMAGE_CHANNEL_1_SHIFT ) | \
+ ( ( ( two ) & VN_IMAGE_CHANNEL_MASK ) << VN_IMAGE_CHANNEL_2_SHIFT ) | \
+ ( ( ( three ) & VN_IMAGE_CHANNEL_MASK ) << VN_IMAGE_CHANNEL_3_SHIFT ) )
+
+//
+// The image format list that is enumerated below was designed to support a broad set of
+// useful formats, and to support the easy addition of new formats in the future. This list
+// is reserved for interleaved RGB formats only, and should not include planar formats or
+// formats in alternate color spaces (e.g. YUV).
+//
+
+enum VN_IMAGE_FORMAT
+{
+ VN_IMAGE_FORMAT_NONE = VN_MAKE_IMAGE_FORMAT( 0, 0, 0, 0, 0, 0, 0, 0, 0 ),
+
+ //
+ // Unsigned integer formats
+ //
+
+ VN_IMAGE_FORMAT_R8 = VN_MAKE_IMAGE_FORMAT( 0, 0, 0, 0, 0, 8, 0, 0, 0 ),
+ VN_IMAGE_FORMAT_R16 = VN_MAKE_IMAGE_FORMAT( 0, 0, 0, 0, 0, 16, 0, 0, 0 ),
+ VN_IMAGE_FORMAT_D16 = VN_MAKE_IMAGE_FORMAT( 0, 0, 1, 0, 0, 16, 0, 0, 0 ),
+ VN_IMAGE_FORMAT_R24 = VN_MAKE_IMAGE_FORMAT( 0, 0, 0, 0, 0, 24, 0, 0, 0 ),
+ VN_IMAGE_FORMAT_D32 = VN_MAKE_IMAGE_FORMAT( 0, 0, 1, 0, 0, 32, 0, 0, 0 ),
+ VN_IMAGE_FORMAT_R32 = VN_MAKE_IMAGE_FORMAT( 0, 0, 0, 0, 0, 32, 0, 0, 0 ),
+
+ VN_IMAGE_FORMAT_R3G3B2 = VN_MAKE_IMAGE_FORMAT( 0, 0, 0, 0, 0, 3, 3, 2, 0 ),
+ VN_IMAGE_FORMAT_R5G6B5 = VN_MAKE_IMAGE_FORMAT( 0, 0, 0, 0, 0, 5, 6, 5, 0 ),
+ VN_IMAGE_FORMAT_R5G5B5A1 = VN_MAKE_IMAGE_FORMAT( 0, 0, 0, 0, 0, 5, 5, 5, 1 ),
+ VN_IMAGE_FORMAT_R4G4B4A4 = VN_MAKE_IMAGE_FORMAT( 0, 0, 0, 0, 0, 4, 4, 4, 4 ),
+
+ VN_IMAGE_FORMAT_R8G8B8 = VN_MAKE_IMAGE_FORMAT( 0, 0, 0, 0, 0, 8, 8, 8, 0 ),
+ VN_IMAGE_FORMAT_R16G16B16 = VN_MAKE_IMAGE_FORMAT( 0, 0, 0, 0, 0, 16, 16, 16, 0 ),
+ VN_IMAGE_FORMAT_R32G32B32 = VN_MAKE_IMAGE_FORMAT( 0, 0, 0, 0, 0, 32, 32, 32, 0 ),
+
+ VN_IMAGE_FORMAT_R8G8B8A8 = VN_MAKE_IMAGE_FORMAT( 0, 0, 0, 0, 0, 8, 8, 8, 8 ),
+ VN_IMAGE_FORMAT_R10G10B10A2 = VN_MAKE_IMAGE_FORMAT( 0, 0, 0, 0, 0, 10, 10, 10, 2 ),
+ VN_IMAGE_FORMAT_R16G16B16A16 = VN_MAKE_IMAGE_FORMAT( 0, 0, 0, 0, 0, 16, 16, 16, 16 ),
+ VN_IMAGE_FORMAT_R32G32B32A32 = VN_MAKE_IMAGE_FORMAT( 0, 0, 0, 0, 0, 32, 32, 32, 32 ),
+
+ //
+ // Signed integer formats
+ //
+
+ VN_IMAGE_FORMAT_R8S = VN_MAKE_IMAGE_FORMAT( 0, 0, 0, 1, 0, 8, 0, 0, 0 ),
+ VN_IMAGE_FORMAT_R16S = VN_MAKE_IMAGE_FORMAT( 0, 0, 0, 1, 0, 16, 0, 0, 0 ),
+ VN_IMAGE_FORMAT_R32S = VN_MAKE_IMAGE_FORMAT( 0, 0, 0, 1, 0, 32, 0, 0, 0 ),
+
+ VN_IMAGE_FORMAT_R8G8B8S = VN_MAKE_IMAGE_FORMAT( 0, 0, 0, 1, 0, 8, 8, 8, 0 ),
+ VN_IMAGE_FORMAT_R16G16B16S = VN_MAKE_IMAGE_FORMAT( 0, 0, 0, 1, 0, 16, 16, 16, 0 ),
+ VN_IMAGE_FORMAT_R32G32B32S = VN_MAKE_IMAGE_FORMAT( 0, 0, 0, 1, 0, 32, 32, 32, 0 ),
+
+ //
+ // Float formats
+ //
+
+ VN_IMAGE_FORMAT_R16F = VN_MAKE_IMAGE_FORMAT( 0, 1, 0, 0, 0, 16, 0, 0, 0 ),
+ VN_IMAGE_FORMAT_R32F = VN_MAKE_IMAGE_FORMAT( 0, 1, 0, 0, 0, 32, 0, 0, 0 ),
+
+ VN_IMAGE_FORMAT_R32G32F = VN_MAKE_IMAGE_FORMAT( 0, 1, 0, 0, 0, 32, 32, 0, 0 ),
+
+ VN_IMAGE_FORMAT_R16G16B16F = VN_MAKE_IMAGE_FORMAT( 0, 1, 0, 0, 0, 16, 16, 16, 0 ),
+ VN_IMAGE_FORMAT_R32G32B32F = VN_MAKE_IMAGE_FORMAT( 0, 1, 0, 0, 0, 32, 32, 32, 0 ),
+
+ VN_IMAGE_FORMAT_R16G16B16A16F = VN_MAKE_IMAGE_FORMAT( 0, 1, 0, 0, 0, 16, 16, 16, 16 ),
+ VN_IMAGE_FORMAT_R32G32B32A32F = VN_MAKE_IMAGE_FORMAT( 0, 1, 0, 0, 0, 32, 32, 32, 32 ),
+
+ // ...
+
+ VN_IMAGE_FORMAT_FORCE_DWORD = 0x7FFFFFFF
+};
+
+//
+// Format aliases
+//
+
+#define VN_IMAGE_FORMAT_L8 VN_IMAGE_FORMAT_R8
+#define VN_IMAGE_FORMAT_U8 VN_IMAGE_FORMAT_R8
+#define VN_IMAGE_FORMAT_V8 VN_IMAGE_FORMAT_R8
+#define VN_IMAGE_FORMAT_L16 VN_IMAGE_FORMAT_R16
+#define VN_IMAGE_FORMAT_U16 VN_IMAGE_FORMAT_R16
+#define VN_IMAGE_FORMAT_V16 VN_IMAGE_FORMAT_R16
+
+#endif // __VN_IMAGE_FORMAT_H__
\ No newline at end of file
diff --git a/src/libs/image-resampler/Base/vnMath.h b/src/libs/image-resampler/Base/vnMath.h
new file mode 100644
index 000000000..d3d661fa0
--- /dev/null
+++ b/src/libs/image-resampler/Base/vnMath.h
@@ -0,0 +1,160 @@
+
+// Copyright (c) 2002-2009 Joe Bertolami. All Right Reserved.
+//
+// vnMath.h
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice, this
+// list of conditions and the following disclaimer.
+//
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Additional Information:
+//
+// For more information, visit http://www.bertolami.com.
+
+#ifndef __VN_MATH_H__
+#define __VN_MATH_H__
+
+#include "vnBase.h"
+#include "vnHalf.h"
+
+#define VN_MAX_INT64 (0x7FFFFFFFFFFFFFFF)
+#define VN_MAX_INT32 (0x7FFFFFFF)
+#define VN_MAX_INT16 (0x7FFF)
+#define VN_MAX_INT8 (0x7F)
+
+#define VN_MAX_UINT64 (0xFFFFFFFFFFFFFFFF)
+#define VN_MAX_UINT32 (0xFFFFFFFF)
+#define VN_MAX_UINT16 (0xFFFF)
+#define VN_MAX_UINT8 (0xFF)
+
+#define VN_MIN_INT64 (-VN_MAX_INT64 - 1)
+#define VN_MIN_INT32 (-VN_MAX_INT32 - 1)
+#define VN_MIN_INT16 (-VN_MAX_INT16 - 1)
+#define VN_MIN_INT8 (-VN_MAX_INT8 - 1)
+
+#define VN_PI (3.14159262f)
+
+#define VN_MIN2( a, b ) ((a) < (b) ? (a) : (b))
+#define VN_MAX2( a, b ) ((a) > (b) ? (a) : (b))
+#define VN_MIN3( a, b, c ) ((c) < (a) ? ((c) < (b) ? (c) : (b)) : (a) < (b) ? (a) : (b))
+#define VN_MAX3( a, b, c ) ((c) > (a) ? ((c) > (b) ? (c) : (b)) : (a) > (b) ? (a) : (b))
+
+inline FLOAT32 vnClipRange( FLOAT32 fInput, FLOAT32 fLow, FLOAT32 fHigh )
+{
+ if ( fInput < fLow ) fInput = fLow;
+ else if ( fInput > fHigh ) fInput = fHigh;
+ return fInput;
+}
+
+inline INT32 vnClipRange( INT32 iInput, INT32 iLow, INT32 iHigh )
+{
+ if ( iInput < iLow ) iInput = iLow;
+ else if ( iInput > iHigh ) iInput = iHigh;
+ return iInput;
+}
+
+inline FLOAT64 vnClipRange64( FLOAT64 fInput, FLOAT64 fLow, FLOAT64 fHigh )
+{
+ if ( fInput < fLow ) fInput = fLow;
+ else if ( fInput > fHigh ) fInput = fHigh;
+ return fInput;
+}
+
+inline INT64 vnClipRange64( INT64 iInput, INT64 iLow, INT64 iHigh )
+{
+ if ( iInput < iLow ) iInput = iLow;
+ else if ( iInput > iHigh ) iInput = iHigh;
+ return iInput;
+}
+
+inline FLOAT32 vnSaturate( FLOAT32 fInput )
+{
+ return vnClipRange( fInput, 0.0f, 1.0f );
+}
+
+inline INT32 vnSaturate( INT32 iInput )
+{
+ return vnClipRange( iInput, 0, 255 );
+}
+
+inline BOOL vnIsPow2( UINT32 uiValue )
+{
+ return ( 0 == ( uiValue & ( uiValue - 1 ) ) );
+}
+
+inline FLOAT32 vnGreaterMultiple( FLOAT32 fValue, FLOAT32 fMultiple )
+{
+ FLOAT32 mod = fmod( fValue, fMultiple );
+
+ if ( mod != 0.0f )
+ {
+ fValue += ( fMultiple - mod );
+ }
+
+ return fValue;
+}
+
+inline UINT32 vnGreaterMultiple( UINT32 uiValue, UINT32 uiMultiple )
+{
+ UINT32 mod = uiValue % uiMultiple;
+
+ if ( 0 != mod )
+ {
+ uiValue += ( uiMultiple - mod );
+ }
+
+ return uiValue;
+}
+
+inline UINT32 vnAlign( UINT32 uiValue, UINT32 uiAlignment )
+{
+ return vnGreaterMultiple( uiValue, uiAlignment );
+}
+
+inline UINT32 vnAlign16( UINT32 uiValue )
+{
+ return ( uiValue & 0xF ? uiValue + ~( uiValue & 0xF ) + 1 : uiValue );
+}
+
+inline UINT32 vnAlign8( UINT32 uiValue )
+{
+ return ( uiValue & 0x7 ? uiValue + ~( uiValue & 0x7 ) + 1 : uiValue );
+}
+
+inline UINT32 vnAlign2( UINT32 uiValue )
+{
+ if ( vnIsPow2( uiValue ) )
+ {
+ return uiValue;
+ }
+
+ INT32 iPower = 0;
+
+ while ( uiValue )
+ {
+ uiValue >>= 1;
+
+ iPower++;
+ }
+
+ return 1 << iPower;
+}
+
+#endif // __VN_MATH_H__
\ No newline at end of file
diff --git a/src/libs/image-resampler/Kernels/vnImageAverage.cpp b/src/libs/image-resampler/Kernels/vnImageAverage.cpp
new file mode 100644
index 000000000..3673e50b9
--- /dev/null
+++ b/src/libs/image-resampler/Kernels/vnImageAverage.cpp
@@ -0,0 +1,286 @@
+
+#include "vnImageAverage.h"
+
+#include "../Utilities/vnImageBlock.h"
+
+VN_STATUS vnAverageKernel( CONST CVImage & pSrcImage, FLOAT32 fX, FLOAT32 fY, FLOAT32 fRadius, UINT8 * pRawOutput )
+{
+ if ( VN_PARAM_CHECK )
+ {
+ if ( 0 == fRadius || !pRawOutput || !VN_IS_IMAGE_VALID(pSrcImage) )
+ {
+ return vnPostError( VN_ERROR_INVALIDARG );
+ }
+ }
+
+ FLOAT32 fSampleCount = 0;
+ VN_PIXEL_BLOCK gTotalBlock = {0};
+ VN_PIXEL_BLOCK gTempBlock = {0};
+ INT32 iRadius = fRadius + 1.0f;
+
+ //
+ // Scan the kernel space adding up the pixel values
+ //
+
+ for ( INT32 j = -iRadius + 1; j <= iRadius; j++ )
+ for ( INT32 i = -iRadius + 1; i <= iRadius; i++ )
+ {
+ INT32 iX = (INT32) fX + i;
+ INT32 iY = (INT32) fY + j;
+
+ if ( iX < 0 || iY < 0 ||
+ iX > pSrcImage.QueryWidth() - 1 ||
+ iY > pSrcImage.QueryHeight() - 1 )
+ {
+ continue;
+ }
+
+ UINT8 * pSrcPixel = pSrcImage.QueryData() + pSrcImage.BlockOffset( iX, iY );
+
+ vnConvertToBlock( pSrcPixel, pSrcImage.QueryFormat(), &gTempBlock );
+
+ if ( VN_IMAGE_PRECISION_FLOAT == gTempBlock.uiPrecision )
+ {
+ gTotalBlock.fChannelData[0] += gTempBlock.fChannelData[0];
+ gTotalBlock.fChannelData[1] += gTempBlock.fChannelData[1];
+ gTotalBlock.fChannelData[2] += gTempBlock.fChannelData[2];
+ gTotalBlock.fChannelData[3] += gTempBlock.fChannelData[3];
+ }
+ else
+ {
+ gTotalBlock.iChannelData[0] += gTempBlock.iChannelData[0];
+ gTotalBlock.iChannelData[1] += gTempBlock.iChannelData[1];
+ gTotalBlock.iChannelData[2] += gTempBlock.iChannelData[2];
+ gTotalBlock.iChannelData[3] += gTempBlock.iChannelData[3];
+ }
+
+ fSampleCount++;
+ }
+
+ //
+ // Normalize our simple sum back to the valid pixel range
+ //
+
+ FLOAT32 fScaleFactor = 1.0f / fSampleCount;
+
+ gTotalBlock.uiPrecision = gTempBlock.uiPrecision;
+ gTotalBlock.uiChannelCount = gTempBlock.uiChannelCount;
+
+ if ( VN_IMAGE_PRECISION_FLOAT == gTempBlock.uiPrecision )
+ {
+ gTotalBlock.fChannelData[0] = fScaleFactor * gTotalBlock.fChannelData[0];
+ gTotalBlock.fChannelData[1] = fScaleFactor * gTotalBlock.fChannelData[1];
+ gTotalBlock.fChannelData[2] = fScaleFactor * gTotalBlock.fChannelData[2];
+ gTotalBlock.fChannelData[3] = fScaleFactor * gTotalBlock.fChannelData[3];
+ }
+ else
+ {
+ gTotalBlock.iChannelData[0] = fScaleFactor * gTotalBlock.iChannelData[0];
+ gTotalBlock.iChannelData[1] = fScaleFactor * gTotalBlock.iChannelData[1];
+ gTotalBlock.iChannelData[2] = fScaleFactor * gTotalBlock.iChannelData[2];
+ gTotalBlock.iChannelData[3] = fScaleFactor * gTotalBlock.iChannelData[3];
+ }
+
+ //
+ // Write our weighted sum to our output
+ //
+
+ vnConvertFromBlock( gTotalBlock, pSrcImage.QueryFormat(), pRawOutput );
+
+ return VN_SUCCESS;
+}
+
+VN_STATUS vnSampleAverageVertical( CONST CVImage & pSrcImage, FLOAT32 fX, FLOAT32 fY, FLOAT32 fRadius, UINT8 * pRawOutput )
+{
+ if ( VN_PARAM_CHECK )
+ {
+ if ( 0 == fRadius || !pRawOutput || !VN_IS_IMAGE_VALID(pSrcImage) )
+ {
+ return vnPostError( VN_ERROR_INVALIDARG );
+ }
+ }
+
+ FLOAT32 fSampleCount = 0;
+ VN_PIXEL_BLOCK gTotalBlock = {0};
+ VN_PIXEL_BLOCK gTempBlock = {0};
+ INT32 iRadius = fRadius + 1.0f;
+
+ //
+ // Scan the kernel space adding up the pixel values
+ //
+
+ for ( INT32 j = -iRadius + 1; j <= iRadius; j++ )
+ {
+ INT32 iX = (INT32) fX;
+ INT32 iY = (INT32) fY + j;
+
+ if ( iX < 0 || iY < 0 ||
+ iX > pSrcImage.QueryWidth() - 1 ||
+ iY > pSrcImage.QueryHeight() - 1 )
+ {
+ continue;
+ }
+
+ UINT8 * pSrcPixel = pSrcImage.QueryData() + pSrcImage.BlockOffset( iX, iY );
+
+ vnConvertToBlock( pSrcPixel, pSrcImage.QueryFormat(), &gTempBlock );
+
+ if ( VN_IMAGE_PRECISION_FLOAT == gTempBlock.uiPrecision )
+ {
+ gTotalBlock.fChannelData[0] += gTempBlock.fChannelData[0];
+ gTotalBlock.fChannelData[1] += gTempBlock.fChannelData[1];
+ gTotalBlock.fChannelData[2] += gTempBlock.fChannelData[2];
+ gTotalBlock.fChannelData[3] += gTempBlock.fChannelData[3];
+ }
+ else
+ {
+ gTotalBlock.iChannelData[0] += gTempBlock.iChannelData[0];
+ gTotalBlock.iChannelData[1] += gTempBlock.iChannelData[1];
+ gTotalBlock.iChannelData[2] += gTempBlock.iChannelData[2];
+ gTotalBlock.iChannelData[3] += gTempBlock.iChannelData[3];
+ }
+
+ fSampleCount++;
+ }
+
+ //
+ // Normalize our sum back to the valid pixel range
+ //
+
+ FLOAT32 fScaleFactor = 1.0f / fSampleCount;
+
+ gTotalBlock.uiPrecision = gTempBlock.uiPrecision;
+ gTotalBlock.uiChannelCount = gTempBlock.uiChannelCount;
+
+ if ( VN_IMAGE_PRECISION_FLOAT == gTempBlock.uiPrecision )
+ {
+ gTotalBlock.fChannelData[0] = fScaleFactor * gTotalBlock.fChannelData[0];
+ gTotalBlock.fChannelData[1] = fScaleFactor * gTotalBlock.fChannelData[1];
+ gTotalBlock.fChannelData[2] = fScaleFactor * gTotalBlock.fChannelData[2];
+ gTotalBlock.fChannelData[3] = fScaleFactor * gTotalBlock.fChannelData[3];
+ }
+ else
+ {
+ gTotalBlock.iChannelData[0] = fScaleFactor * gTotalBlock.iChannelData[0];
+ gTotalBlock.iChannelData[1] = fScaleFactor * gTotalBlock.iChannelData[1];
+ gTotalBlock.iChannelData[2] = fScaleFactor * gTotalBlock.iChannelData[2];
+ gTotalBlock.iChannelData[3] = fScaleFactor * gTotalBlock.iChannelData[3];
+ }
+
+ //
+ // Write our weighted sum to our output
+ //
+
+ vnConvertFromBlock( gTotalBlock, pSrcImage.QueryFormat(), pRawOutput );
+
+ return VN_SUCCESS;
+}
+
+VN_STATUS vnSampleAverageHorizontal( CONST CVImage & pSrcImage, FLOAT32 fX, FLOAT32 fY, FLOAT32 fRadius, UINT8 * pRawOutput )
+{
+ if ( VN_PARAM_CHECK )
+ {
+ if ( 0 == fRadius || !pRawOutput || !VN_IS_IMAGE_VALID(pSrcImage) )
+ {
+ return vnPostError( VN_ERROR_INVALIDARG );
+ }
+ }
+
+ FLOAT32 fSampleCount = 0;
+ VN_PIXEL_BLOCK gTotalBlock = {0};
+ VN_PIXEL_BLOCK gTempBlock = {0};
+ INT32 iRadius = fRadius + 1.0f;
+
+ //
+ // Scan the kernel space adding up the pixel values
+ //
+
+ for ( INT32 i = -iRadius + 1; i <= iRadius; i++ )
+ {
+ INT32 iX = (INT32) fX + i;
+ INT32 iY = (INT32) fY;
+
+ if ( iX < 0 || iY < 0 ||
+ iX > pSrcImage.QueryWidth() - 1 ||
+ iY > pSrcImage.QueryHeight() - 1 )
+ {
+ continue;
+ }
+
+ UINT8 * pSrcPixel = pSrcImage.QueryData() + pSrcImage.BlockOffset( iX, iY );
+
+ vnConvertToBlock( pSrcPixel, pSrcImage.QueryFormat(), &gTempBlock );
+
+ if ( VN_IMAGE_PRECISION_FLOAT == gTempBlock.uiPrecision )
+ {
+ gTotalBlock.fChannelData[0] += gTempBlock.fChannelData[0];
+ gTotalBlock.fChannelData[1] += gTempBlock.fChannelData[1];
+ gTotalBlock.fChannelData[2] += gTempBlock.fChannelData[2];
+ gTotalBlock.fChannelData[3] += gTempBlock.fChannelData[3];
+ }
+ else
+ {
+ gTotalBlock.iChannelData[0] += gTempBlock.iChannelData[0];
+ gTotalBlock.iChannelData[1] += gTempBlock.iChannelData[1];
+ gTotalBlock.iChannelData[2] += gTempBlock.iChannelData[2];
+ gTotalBlock.iChannelData[3] += gTempBlock.iChannelData[3];
+ }
+
+ fSampleCount++;
+ }
+
+ //
+ // Normalize our sum back to the valid pixel range
+ //
+
+ FLOAT32 fScaleFactor = 1.0f / fSampleCount;
+
+ gTotalBlock.uiPrecision = gTempBlock.uiPrecision;
+ gTotalBlock.uiChannelCount = gTempBlock.uiChannelCount;
+
+ if ( VN_IMAGE_PRECISION_FLOAT == gTempBlock.uiPrecision )
+ {
+ gTotalBlock.fChannelData[0] = fScaleFactor * gTotalBlock.fChannelData[0];
+ gTotalBlock.fChannelData[1] = fScaleFactor * gTotalBlock.fChannelData[1];
+ gTotalBlock.fChannelData[2] = fScaleFactor * gTotalBlock.fChannelData[2];
+ gTotalBlock.fChannelData[3] = fScaleFactor * gTotalBlock.fChannelData[3];
+ }
+ else
+ {
+ gTotalBlock.iChannelData[0] = fScaleFactor * gTotalBlock.iChannelData[0];
+ gTotalBlock.iChannelData[1] = fScaleFactor * gTotalBlock.iChannelData[1];
+ gTotalBlock.iChannelData[2] = fScaleFactor * gTotalBlock.iChannelData[2];
+ gTotalBlock.iChannelData[3] = fScaleFactor * gTotalBlock.iChannelData[3];
+ }
+
+ //
+ // Write our weighted sum to our output
+ //
+
+ vnConvertFromBlock( gTotalBlock, pSrcImage.QueryFormat(), pRawOutput );
+
+ return VN_SUCCESS;
+}
+
+VN_STATUS vnAverageKernel( CONST CVImage & pSrcImage, FLOAT32 fX, FLOAT32 fY, BOOL bDirection, FLOAT32 fRadius, UINT8 * pRawOutput )
+{
+ if ( VN_PARAM_CHECK )
+ {
+ if ( 0 == fRadius || !pRawOutput || !VN_IS_IMAGE_VALID(pSrcImage) )
+ {
+ return vnPostError( VN_ERROR_INVALIDARG );
+ }
+ }
+
+ //
+ // Compute the horizontal or vertical average sample at the requested pixel coordinate.
+ //
+
+ switch ( bDirection )
+ {
+ case FALSE: return vnSampleAverageHorizontal( pSrcImage, fX, fY, fRadius, pRawOutput );
+ case TRUE: return vnSampleAverageVertical( pSrcImage, fX, fY, fRadius, pRawOutput );
+ }
+
+ return VN_ERROR_NOTIMPL;
+}
\ No newline at end of file
diff --git a/src/libs/image-resampler/Kernels/vnImageAverage.h b/src/libs/image-resampler/Kernels/vnImageAverage.h
new file mode 100644
index 000000000..afa3da830
--- /dev/null
+++ b/src/libs/image-resampler/Kernels/vnImageAverage.h
@@ -0,0 +1,96 @@
+
+//
+// Copyright (c) 2002-2009 Joe Bertolami. All Right Reserved.
+//
+// vnImageAverage.h
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice, this
+// list of conditions and the following disclaimer.
+//
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Additional Information:
+//
+// For more information, visit http://www.bertolami.com.
+//
+
+#ifndef __VN_IMAGE_AVERAGE_H__
+#define __VN_IMAGE_AVERAGE_H__
+
+#include "../Base/vnBase.h"
+#include "../Base/vnMath.h"
+#include "../Base/vnImage.h"
+
+//
+// Average Kernel
+//
+// AverageKernel performs a simple combined 2D average sampling of a source image.
+// Average filters are separable, but this function performs a non-separated kernel
+// that facilitates non-uniform sampling filters.
+//
+// Parameters:
+//
+// pSrcImage: The read-only source image to filter.
+//
+// fX, fY: Specifies the coordinates to sample in the source image.
+//
+// fRadius: The radius to sample with the average kernel.
+//
+// pRawOutput: a pointer to a pixel buffer. Upon return, this buffer will contain the
+// sampled result of the operation.
+//
+// Supported Image Formats:
+//
+// All image formats are supported.
+//
+
+VN_STATUS vnAverageKernel( CONST CVImage & pSrcImage, FLOAT32 fX, FLOAT32 fY, FLOAT32 fRadius, UINT8 * pRawOutput );
+
+//
+// Average Kernel
+//
+// AverageKernel performs a simple horizontal or vertical average sampling of a
+// source image.
+//
+// (!) Note: average filters are separable. This interface may be used to improve
+// operator performance by sampling the horizontal and vertical directions
+// separately.
+//
+// Parameters:
+//
+// pSrcImage: The read-only source image to filter.
+//
+// fX, fY: Specifies the coordinates to sample in the source image.
+//
+// bDirection: Specifies the direction to sample. FALSE indicates horizontal sampling,
+// while TRUE indicates vertical.
+//
+// fRadius: The radius to sample with the average kernel.
+//
+// pRawOutput: a pointer to a pixel buffer. Upon return, this buffer will contain the
+// sampled result of the operation.
+//
+// Supported Image Formats:
+//
+// All image formats are supported.
+//
+
+VN_STATUS vnAverageKernel( CONST CVImage & pSrcImage, FLOAT32 fX, FLOAT32 fY, BOOL bDirection, FLOAT32 fRadius, UINT8 * pRawOutput );
+
+#endif // __VN_IMAGE_AVERAGE_H__
\ No newline at end of file
diff --git a/src/libs/image-resampler/Kernels/vnImageBicubic.cpp b/src/libs/image-resampler/Kernels/vnImageBicubic.cpp
new file mode 100644
index 000000000..d8c14328c
--- /dev/null
+++ b/src/libs/image-resampler/Kernels/vnImageBicubic.cpp
@@ -0,0 +1,296 @@
+
+#include "vnImageBicubic.h"
+
+#include "../Utilities/vnImageBlock.h"
+
+VN_STATUS vnBicubicKernel( CONST CVImage & pSrcImage, FLOAT32 fCoeffB, FLOAT32 fCoeffC, FLOAT32 fX, FLOAT32 fY, UINT8 * pRawOutput )
+{
+ if ( VN_PARAM_CHECK )
+ {
+ if ( !pRawOutput || !VN_IS_IMAGE_VALID(pSrcImage) )
+ {
+ return vnPostError( VN_ERROR_INVALIDARG );
+ }
+ }
+
+ FLOAT32 fSampleCount = 0;
+ VN_PIXEL_BLOCK gTotalBlock = {0};
+ VN_PIXEL_BLOCK gTempBlock = {0};
+
+ //
+ // Scan the kernel space adding up the bicubic weights and pixel values
+ //
+
+ for ( INT32 j = -2 + 1; j <= 2; j++ )
+ for ( INT32 i = -2 + 1; i <= 2; i++ )
+ {
+ INT32 iX = (INT32) fX + i;
+ INT32 iY = (INT32) fY + j;
+
+ if ( iX < 0 || iY < 0 ||
+ iX > pSrcImage.QueryWidth() - 1 ||
+ iY > pSrcImage.QueryHeight() - 1 )
+ {
+ continue;
+ }
+
+ UINT8 * pSrcPixel = pSrcImage.QueryData() + pSrcImage.BlockOffset( iX, iY );
+
+ FLOAT32 fXDelta = (FLOAT32) fX - iX;
+ FLOAT32 fYDelta = (FLOAT32) fY - iY;
+ FLOAT32 fDistance = sqrtf( fXDelta * fXDelta + fYDelta * fYDelta );
+ FLOAT32 fWeight = vnBicubicWeight( fCoeffB, fCoeffC, (FLOAT32) fDistance );
+
+ vnConvertToBlock( pSrcPixel, pSrcImage.QueryFormat(), &gTempBlock );
+
+ if ( VN_IMAGE_PRECISION_FLOAT == gTempBlock.uiPrecision )
+ {
+ gTotalBlock.fChannelData[0] += fWeight * gTempBlock.fChannelData[0];
+ gTotalBlock.fChannelData[1] += fWeight * gTempBlock.fChannelData[1];
+ gTotalBlock.fChannelData[2] += fWeight * gTempBlock.fChannelData[2];
+ gTotalBlock.fChannelData[3] += fWeight * gTempBlock.fChannelData[3];
+ }
+ else
+ {
+ gTotalBlock.iChannelData[0] += fWeight * gTempBlock.iChannelData[0];
+ gTotalBlock.iChannelData[1] += fWeight * gTempBlock.iChannelData[1];
+ gTotalBlock.iChannelData[2] += fWeight * gTempBlock.iChannelData[2];
+ gTotalBlock.iChannelData[3] += fWeight * gTempBlock.iChannelData[3];
+ }
+
+ fSampleCount += fWeight;
+ }
+
+ //
+ // Normalize our bicubic sum back to the valid pixel range
+ //
+
+ FLOAT32 fScaleFactor = 1.0f / fSampleCount;
+
+ gTotalBlock.uiPrecision = gTempBlock.uiPrecision;
+ gTotalBlock.uiChannelCount = gTempBlock.uiChannelCount;
+
+ if ( VN_IMAGE_PRECISION_FLOAT == gTempBlock.uiPrecision )
+ {
+ gTotalBlock.fChannelData[0] = fScaleFactor * gTotalBlock.fChannelData[0];
+ gTotalBlock.fChannelData[1] = fScaleFactor * gTotalBlock.fChannelData[1];
+ gTotalBlock.fChannelData[2] = fScaleFactor * gTotalBlock.fChannelData[2];
+ gTotalBlock.fChannelData[3] = fScaleFactor * gTotalBlock.fChannelData[3];
+ }
+ else
+ {
+ gTotalBlock.iChannelData[0] = fScaleFactor * gTotalBlock.iChannelData[0];
+ gTotalBlock.iChannelData[1] = fScaleFactor * gTotalBlock.iChannelData[1];
+ gTotalBlock.iChannelData[2] = fScaleFactor * gTotalBlock.iChannelData[2];
+ gTotalBlock.iChannelData[3] = fScaleFactor * gTotalBlock.iChannelData[3];
+ }
+
+ //
+ // Write our weighted sum to our output
+ //
+
+ vnConvertFromBlock( gTotalBlock, pSrcImage.QueryFormat(), pRawOutput );
+
+ return VN_SUCCESS;
+}
+
+VN_STATUS vnSampleBicubicVertical( CONST CVImage & pSrcImage, FLOAT32 fCoeffB, FLOAT32 fCoeffC, FLOAT32 fX, FLOAT32 fY, UINT8 * pRawOutput )
+{
+ if ( VN_PARAM_CHECK )
+ {
+ if ( !pRawOutput || !VN_IS_IMAGE_VALID(pSrcImage) )
+ {
+ return vnPostError( VN_ERROR_INVALIDARG );
+ }
+ }
+
+ FLOAT32 fSampleCount = 0;
+ VN_PIXEL_BLOCK gTotalBlock = {0};
+ VN_PIXEL_BLOCK gTempBlock = {0};
+
+ //
+ // Scan the kernel space adding up the bicubic weights and pixel values
+ //
+
+ for ( INT32 j = -2; j < 2; j++ )
+ {
+ INT32 iX = (INT32) fX;
+ INT32 iY = (INT32) fY + j;
+
+ if ( iX < 0 || iY < 0 ||
+ iX > pSrcImage.QueryWidth() - 1 ||
+ iY > pSrcImage.QueryHeight() - 1 )
+ {
+ continue;
+ }
+
+ UINT8 * pSrcPixel = pSrcImage.QueryData() + pSrcImage.BlockOffset( iX, iY );
+
+ FLOAT32 fYDelta = (FLOAT32) fY - iY;
+ FLOAT32 fDistance = fabs( fYDelta );
+ FLOAT32 fWeight = vnBicubicWeight( fCoeffB, fCoeffC, (FLOAT32) fDistance );
+
+ vnConvertToBlock( pSrcPixel, pSrcImage.QueryFormat(), &gTempBlock );
+
+ if ( VN_IMAGE_PRECISION_FLOAT == gTempBlock.uiPrecision )
+ {
+ gTotalBlock.fChannelData[0] += fWeight * gTempBlock.fChannelData[0];
+ gTotalBlock.fChannelData[1] += fWeight * gTempBlock.fChannelData[1];
+ gTotalBlock.fChannelData[2] += fWeight * gTempBlock.fChannelData[2];
+ gTotalBlock.fChannelData[3] += fWeight * gTempBlock.fChannelData[3];
+ }
+ else
+ {
+ gTotalBlock.iChannelData[0] += fWeight * gTempBlock.iChannelData[0];
+ gTotalBlock.iChannelData[1] += fWeight * gTempBlock.iChannelData[1];
+ gTotalBlock.iChannelData[2] += fWeight * gTempBlock.iChannelData[2];
+ gTotalBlock.iChannelData[3] += fWeight * gTempBlock.iChannelData[3];
+ }
+
+ fSampleCount += fWeight;
+ }
+
+ //
+ // Normalize our bicubic sum back to the valid pixel range
+ //
+
+ FLOAT32 fScaleFactor = 1.0f / fSampleCount;
+
+ gTotalBlock.uiPrecision = gTempBlock.uiPrecision;
+ gTotalBlock.uiChannelCount = gTempBlock.uiChannelCount;
+
+ if ( VN_IMAGE_PRECISION_FLOAT == gTempBlock.uiPrecision )
+ {
+ gTotalBlock.fChannelData[0] = fScaleFactor * gTotalBlock.fChannelData[0];
+ gTotalBlock.fChannelData[1] = fScaleFactor * gTotalBlock.fChannelData[1];
+ gTotalBlock.fChannelData[2] = fScaleFactor * gTotalBlock.fChannelData[2];
+ gTotalBlock.fChannelData[3] = fScaleFactor * gTotalBlock.fChannelData[3];
+ }
+ else
+ {
+ gTotalBlock.iChannelData[0] = fScaleFactor * gTotalBlock.iChannelData[0];
+ gTotalBlock.iChannelData[1] = fScaleFactor * gTotalBlock.iChannelData[1];
+ gTotalBlock.iChannelData[2] = fScaleFactor * gTotalBlock.iChannelData[2];
+ gTotalBlock.iChannelData[3] = fScaleFactor * gTotalBlock.iChannelData[3];
+ }
+
+ //
+ // Write our weighted sum to our output
+ //
+
+ vnConvertFromBlock( gTotalBlock, pSrcImage.QueryFormat(), pRawOutput );
+
+ return VN_SUCCESS;
+}
+
+VN_STATUS vnSampleBicubicHorizontal( CONST CVImage & pSrcImage, FLOAT32 fCoeffB, FLOAT32 fCoeffC, FLOAT32 fX, FLOAT32 fY, UINT8 * pRawOutput )
+{
+ if ( VN_PARAM_CHECK )
+ {
+ if ( !pRawOutput || !VN_IS_IMAGE_VALID(pSrcImage) )
+ {
+ return vnPostError( VN_ERROR_INVALIDARG );
+ }
+ }
+
+ FLOAT32 fSampleCount = 0;
+ VN_PIXEL_BLOCK gTotalBlock = {0};
+ VN_PIXEL_BLOCK gTempBlock = {0};
+
+ //
+ // Scan the kernel space adding up the bicubic weights and pixel values
+ //
+
+ for ( INT32 i = -2; i < 2; i++ )
+ {
+ INT32 iX = (INT32) fX + i;
+ INT32 iY = (INT32) fY;
+
+ if ( iX < 0 || iY < 0 ||
+ iX > pSrcImage.QueryWidth() - 1 ||
+ iY > pSrcImage.QueryHeight() - 1 )
+ {
+ continue;
+ }
+
+ UINT8 * pSrcPixel = pSrcImage.QueryData() + pSrcImage.BlockOffset( iX, iY );
+
+ FLOAT32 fXDelta = (FLOAT32) fX - iX;
+ FLOAT32 fDistance = fabs( fXDelta );
+ FLOAT32 fWeight = vnBicubicWeight( fCoeffB, fCoeffC, (FLOAT32) fDistance );
+
+ vnConvertToBlock( pSrcPixel, pSrcImage.QueryFormat(), &gTempBlock );
+
+ if ( VN_IMAGE_PRECISION_FLOAT == gTempBlock.uiPrecision )
+ {
+ gTotalBlock.fChannelData[0] += fWeight * gTempBlock.fChannelData[0];
+ gTotalBlock.fChannelData[1] += fWeight * gTempBlock.fChannelData[1];
+ gTotalBlock.fChannelData[2] += fWeight * gTempBlock.fChannelData[2];
+ gTotalBlock.fChannelData[3] += fWeight * gTempBlock.fChannelData[3];
+ }
+ else
+ {
+ gTotalBlock.iChannelData[0] += fWeight * gTempBlock.iChannelData[0];
+ gTotalBlock.iChannelData[1] += fWeight * gTempBlock.iChannelData[1];
+ gTotalBlock.iChannelData[2] += fWeight * gTempBlock.iChannelData[2];
+ gTotalBlock.iChannelData[3] += fWeight * gTempBlock.iChannelData[3];
+ }
+
+ fSampleCount += fWeight;
+ }
+
+ //
+ // Normalize our bicubic sum back to the valid pixel range
+ //
+
+ FLOAT32 fScaleFactor = 1.0f / fSampleCount;
+
+ gTotalBlock.uiPrecision = gTempBlock.uiPrecision;
+ gTotalBlock.uiChannelCount = gTempBlock.uiChannelCount;
+
+ if ( VN_IMAGE_PRECISION_FLOAT == gTempBlock.uiPrecision )
+ {
+ gTotalBlock.fChannelData[0] = fScaleFactor * gTotalBlock.fChannelData[0];
+ gTotalBlock.fChannelData[1] = fScaleFactor * gTotalBlock.fChannelData[1];
+ gTotalBlock.fChannelData[2] = fScaleFactor * gTotalBlock.fChannelData[2];
+ gTotalBlock.fChannelData[3] = fScaleFactor * gTotalBlock.fChannelData[3];
+ }
+ else
+ {
+ gTotalBlock.iChannelData[0] = fScaleFactor * gTotalBlock.iChannelData[0];
+ gTotalBlock.iChannelData[1] = fScaleFactor * gTotalBlock.iChannelData[1];
+ gTotalBlock.iChannelData[2] = fScaleFactor * gTotalBlock.iChannelData[2];
+ gTotalBlock.iChannelData[3] = fScaleFactor * gTotalBlock.iChannelData[3];
+ }
+
+ //
+ // Write our weighted sum to our output
+ //
+
+ vnConvertFromBlock( gTotalBlock, pSrcImage.QueryFormat(), pRawOutput );
+
+ return VN_SUCCESS;
+}
+
+VN_STATUS vnBicubicKernel( CONST CVImage & pSrcImage, FLOAT32 fCoeffB, FLOAT32 fCoeffC, FLOAT32 fX, FLOAT32 fY, BOOL bDirection, UINT8 * pRawOutput )
+{
+ if ( VN_PARAM_CHECK )
+ {
+ if ( !pRawOutput || !VN_IS_IMAGE_VALID(pSrcImage) )
+ {
+ return vnPostError( VN_ERROR_INVALIDARG );
+ }
+ }
+
+ //
+ // Compute the horizontal or vertical gaussian sample at the requested pixel coordinate.
+ //
+
+ switch ( bDirection )
+ {
+ case FALSE: return vnSampleBicubicHorizontal( pSrcImage, fCoeffB, fCoeffC, fX, fY, pRawOutput );
+ case TRUE: return vnSampleBicubicVertical( pSrcImage, fCoeffB, fCoeffC, fX, fY, pRawOutput );
+ }
+
+ return VN_ERROR_NOTIMPL;
+}
\ No newline at end of file
diff --git a/src/libs/image-resampler/Kernels/vnImageBicubic.h b/src/libs/image-resampler/Kernels/vnImageBicubic.h
new file mode 100644
index 000000000..6bf8861fc
--- /dev/null
+++ b/src/libs/image-resampler/Kernels/vnImageBicubic.h
@@ -0,0 +1,154 @@
+
+//
+// Copyright (c) 2002-2009 Joe Bertolami. All Right Reserved.
+//
+// vnImageBicubic.h
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice, this
+// list of conditions and the following disclaimer.
+//
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Additional Information:
+//
+// For more information, visit http://www.bertolami.com.
+//
+
+#ifndef __VN_IMAGE_BICUBIC_H__
+#define __VN_IMAGE_BICUBIC_H__
+
+#include "../Base/vnBase.h"
+#include "../Base/vnMath.h"
+#include "../Base/vnImage.h"
+
+//
+// Cubic weighing function
+//
+// Source: Mitchell, Netravali, "Reconstruction Filters in Computer Graphics" 1988
+//
+// Several of the popular cubic functions used for bi-directional image filtering
+// can be generated as a simple weight function with two parameters. Thus, we use a
+// weight function to generate the majority of our bicubic kernels.
+//
+
+inline FLOAT32 vnBicubicWeight( FLOAT32 fB, FLOAT32 fC, FLOAT32 fDistance )
+{
+ //
+ // Our bicubic function is designed to provide feedback over a radius of 2.0 pixels.
+ //
+
+ FLOAT32 fRange = fDistance;
+ FLOAT32 fResult = 0.0;
+
+ if ( fRange < 1.0 )
+ {
+ FLOAT32 fCubicTerm = ( 12.0 - 9.0 * fB - 6.0 * fC ) * ( fRange * fRange * fRange );
+ FLOAT32 fQuadTerm = ( -18.0 + 12.0 * fB + 6.0 * fC ) * ( fRange * fRange );
+ FLOAT32 fConstTerm = ( 6.0 - 2.0 * fB );
+
+ fResult = ( 1.0f / 6.0f ) * ( fCubicTerm + fQuadTerm + fConstTerm );
+ }
+
+ else if ( fRange >= 1.0 && fRange < 2.0 )
+ {
+ FLOAT32 fCubicTerm = ( -fB - 6.0 * fC ) * ( fRange * fRange * fRange );
+ FLOAT32 fQuadTerm = ( 6.0 * fB + 30.0 * fC ) * ( fRange * fRange );
+ FLOAT32 fLinTerm = ( -12.0 * fB - 48.0 * fC ) * ( fRange );
+ FLOAT32 fConstTerm = ( 8.0 * fB + 24.0 * fC );
+
+ fResult = ( 1.0f / 6.0f ) * ( fCubicTerm + fQuadTerm + fLinTerm + fConstTerm );
+ }
+
+ if ( fResult < 0 ) fResult = 0.0;
+
+ return fResult;
+}
+
+//
+// Bicubic Kernel
+//
+// BicubicKernel performs a simple combined 2D bicubic sampling of a source image.
+// Bicubic filters are separable, but this function performs a non-separated kernel
+// that facilitates non-uniform sampling filters.
+//
+// Parameters:
+//
+// pSrcImage: The read-only source image to filter.
+//
+// fCoeffB: The alpha mitchell-netravali coefficient
+//
+// fCoeffC: The beta mitchell-netravali coefficient
+//
+// fX, fY: Specifies the coordinates to sample in the source image.
+//
+// pRawOutput: A pointer to a pixel buffer. Upon return, this buffer will contain the
+// sampled result of the operation.
+//
+// Supported Image Formats:
+//
+// All image formats are supported.
+//
+
+VN_STATUS vnBicubicKernel( CONST CVImage & pSrcImage,
+ FLOAT32 fCoeffB,
+ FLOAT32 fCoeffC,
+ FLOAT32 fX,
+ FLOAT32 fY,
+ UINT8 * pRawOutput );
+
+//
+// Bicubic Kernel
+//
+// BicubicKernel performs a simple horizontal or vertical bicubic sampling of a
+// source image.
+//
+// (!) Note: bicubic filters are separable. This interface may be used to improve
+// operator performance by sampling the horizontal and vertical directions
+// separately.
+//
+// Parameters:
+//
+// pSrcImage: The read-only source image to filter.
+//
+// fCoeffB: The alpha mitchell-netravali coefficient
+//
+// fCoeffC: The beta mitchell-netravali coefficient
+//
+// fX, fY: Specifies the coordinates to sample in the source image.
+//
+// bDirection: Specifies the direction to sample. FALSE indicates horizontal sampling,
+// while TRUE indicates vertical.
+//
+// pRawOutput: a pointer to a pixel buffer. Upon return, this buffer will contain the
+// sampled result of the operation.
+//
+// Supported Image Formats:
+//
+// All image formats are supported.
+//
+
+VN_STATUS vnBicubicKernel( CONST CVImage & pSrcImage,
+ FLOAT32 fCoeffB,
+ FLOAT32 fCoeffC,
+ FLOAT32 fX,
+ FLOAT32 fY,
+ BOOL bDirection,
+ UINT8 * pRawOutput );
+
+#endif // __VN_IMAGE_BICUBIC_H__
\ No newline at end of file
diff --git a/src/libs/image-resampler/Kernels/vnImageBilinear.cpp b/src/libs/image-resampler/Kernels/vnImageBilinear.cpp
new file mode 100644
index 000000000..b25cd6a9a
--- /dev/null
+++ b/src/libs/image-resampler/Kernels/vnImageBilinear.cpp
@@ -0,0 +1,273 @@
+
+#include "vnImageBilinear.h"
+
+#include "../Utilities/vnImageBlock.h"
+
+VN_STATUS vnLerpBlocks( CONST VN_PIXEL_BLOCK & blockA, CONST VN_PIXEL_BLOCK & blockB, FLOAT32 fDelta, VN_PIXEL_BLOCK * pOutBlock )
+{
+ if ( VN_PARAM_CHECK )
+ {
+ if ( !pOutBlock )
+ {
+ return vnPostError( VN_ERROR_INVALIDARG );
+ }
+ }
+
+ if ( blockA.uiPrecision != blockB.uiPrecision )
+ {
+ return vnPostError( VN_ERROR_INVALID_RESOURCE );
+ }
+
+ if ( VN_IMAGE_PRECISION_FLOAT == blockA.uiPrecision )
+ {
+ pOutBlock->fChannelData[0] = blockA.fChannelData[0] * ( 1.0 - fDelta ) + blockB.fChannelData[0] * fDelta;
+ pOutBlock->fChannelData[1] = blockA.fChannelData[1] * ( 1.0 - fDelta ) + blockB.fChannelData[1] * fDelta;
+ pOutBlock->fChannelData[2] = blockA.fChannelData[2] * ( 1.0 - fDelta ) + blockB.fChannelData[2] * fDelta;
+ pOutBlock->fChannelData[3] = blockA.fChannelData[3] * ( 1.0 - fDelta ) + blockB.fChannelData[3] * fDelta;
+ }
+ else
+ {
+ pOutBlock->iChannelData[0] = (UINT64) ( (FLOAT64) blockA.iChannelData[0] * ( 1.0 - fDelta ) + blockB.iChannelData[0] * fDelta );
+ pOutBlock->iChannelData[1] = (UINT64) ( (FLOAT64) blockA.iChannelData[1] * ( 1.0 - fDelta ) + blockB.iChannelData[1] * fDelta );
+ pOutBlock->iChannelData[2] = (UINT64) ( (FLOAT64) blockA.iChannelData[2] * ( 1.0 - fDelta ) + blockB.iChannelData[2] * fDelta );
+ pOutBlock->iChannelData[3] = (UINT64) ( (FLOAT64) blockA.iChannelData[3] * ( 1.0 - fDelta ) + blockB.iChannelData[3] * fDelta );
+ }
+
+ return VN_SUCCESS;
+}
+
+VN_STATUS vnBilinearKernel( CONST CVImage & pSrcImage, FLOAT32 fX, FLOAT32 fY, UINT8 * pRawOutput )
+{
+ if ( VN_PARAM_CHECK )
+ {
+ if ( fX < 0 || fY < 0 || !VN_IS_IMAGE_VALID(pSrcImage) || !pRawOutput )
+ {
+ return vnPostError( VN_ERROR_INVALIDARG );
+ }
+ }
+
+ //
+ // Initially we perform a horizontal bilinear kernel on our top row of source pixels
+ //
+
+ VN_PIXEL_BLOCK biBlocks[2] = {0};
+ VN_PIXEL_BLOCK biTotalBlockA = {0};
+ VN_PIXEL_BLOCK biTotalBlockB = {0};
+ VN_PIXEL_BLOCK biTotalBlockOut = {0};
+
+ //
+ // We do not bias our float coordinate by 0.5 because we wish
+ // to sample using the nearest 2 pixels to our coordinate.
+ //
+
+ INT32 iSampleX = fX;
+ INT32 iSampleY = fY;
+
+ FLOAT32 fXDelta = (FLOAT32) fX - iSampleX;
+ FLOAT32 fYDelta = (FLOAT32) fY - iSampleY;
+
+ for ( UINT32 i = 0; i < 2; i++ )
+ {
+ INT32 iSourceX = vnClipRange( iSampleX + i, 0, pSrcImage.QueryWidth() - 1 );
+ INT32 iSourceY = vnClipRange( iSampleY, 0, pSrcImage.QueryHeight() - 1 );
+
+ UINT8 * pSrcPixel = pSrcImage.QueryData() + pSrcImage.BlockOffset( iSourceX, iSourceY );
+
+ vnConvertToBlock( pSrcPixel, pSrcImage.QueryFormat(), &biBlocks[ i ] );
+ }
+
+ biTotalBlockA.uiPrecision = biBlocks[0].uiPrecision;
+ biTotalBlockA.uiChannelCount = biBlocks[0].uiChannelCount;
+
+ if ( VN_FAILED( vnLerpBlocks( biBlocks[0], biBlocks[1], fXDelta, &biTotalBlockA ) ) )
+ {
+ return vnPostError( VN_ERROR_EXECUTION_FAILURE );
+ }
+
+ //
+ // Next we perform a horizontal bilinear kernel on our bottom row of source pixels
+ //
+
+ for ( UINT32 i = 0; i < 2; i++ )
+ {
+ INT32 iSourceX = vnClipRange( iSampleX + i, 0, pSrcImage.QueryWidth() - 1 );
+ INT32 iSourceY = vnClipRange( iSampleY + 1, 0, pSrcImage.QueryHeight() - 1 );
+
+ UINT8 * pSrcPixel = pSrcImage.QueryData() + pSrcImage.BlockOffset( iSourceX, iSourceY );
+
+ vnConvertToBlock( pSrcPixel, pSrcImage.QueryFormat(), &biBlocks[ i ] );
+ }
+
+ biTotalBlockB.uiPrecision = biBlocks[0].uiPrecision;
+ biTotalBlockB.uiChannelCount = biBlocks[0].uiChannelCount;
+
+ if ( VN_FAILED( vnLerpBlocks( biBlocks[0], biBlocks[1], fXDelta, &biTotalBlockB ) ) )
+ {
+ return vnPostError( VN_ERROR_EXECUTION_FAILURE );
+ }
+
+ //
+ // Interpolate our new pixel color using the two source blocks
+ //
+
+ if ( VN_FAILED( vnLerpBlocks( biTotalBlockA, biTotalBlockB, fYDelta, &biTotalBlockOut ) ) )
+ {
+ return vnPostError( VN_ERROR_EXECUTION_FAILURE );
+ }
+
+ //
+ // Write our filtered result out
+ //
+
+ vnConvertFromBlock( biTotalBlockOut, pSrcImage.QueryFormat(), pRawOutput );
+
+ return VN_SUCCESS;
+}
+
+VN_STATUS vnSampleBilinearHorizontal( CONST CVImage & pSrcImage, FLOAT32 fX, FLOAT32 fY, UINT8 * pRawOutput )
+{
+ if ( VN_PARAM_CHECK )
+ {
+ if ( fX < 0 || fY < 0 || !VN_IS_IMAGE_VALID(pSrcImage) || !pRawOutput )
+ {
+ return vnPostError( VN_ERROR_INVALIDARG );
+ }
+ }
+
+ //
+ // Perform a bilinear kernel at the appointed location.
+ //
+
+ VN_PIXEL_BLOCK biBlocks[2] = {0};
+ VN_PIXEL_BLOCK biTotalBlock = {0};
+
+ //
+ // We do not bias our float coordinate by 0.5 because we wish
+ // to sample using the nearest 2 pixels to our coordinate.
+ //
+
+ INT32 iSampleX = fX;
+ INT32 iSampleY = fY;
+
+ for ( UINT32 i = 0; i < 2; i++ )
+ {
+ INT32 iSourceX = vnClipRange( iSampleX + i, 0, pSrcImage.QueryWidth() - 1 );
+ INT32 iSourceY = vnClipRange( iSampleY, 0, pSrcImage.QueryHeight() - 1 );
+
+ UINT8 * pSrcPixel = pSrcImage.QueryData() + pSrcImage.BlockOffset( iSourceX, iSourceY );
+
+ vnConvertToBlock( pSrcPixel, pSrcImage.QueryFormat(), &biBlocks[ i ] );
+ }
+
+ //
+ // Interpolate our new pixel color using the two source blocks
+ //
+
+ biTotalBlock.uiPrecision = biBlocks[0].uiPrecision;
+ biTotalBlock.uiChannelCount = biBlocks[0].uiChannelCount;
+
+ //
+ // Calculate our interpolation parameter
+ //
+
+ FLOAT32 fXDelta = (FLOAT32) fX - iSampleX;
+
+ if ( VN_FAILED( vnLerpBlocks( biBlocks[0], biBlocks[1], fXDelta, &biTotalBlock ) ) )
+ {
+ return vnPostError( VN_ERROR_EXECUTION_FAILURE );
+ }
+
+ //
+ // Write our filtered result out
+ //
+
+ vnConvertFromBlock( biTotalBlock, pSrcImage.QueryFormat(), pRawOutput );
+
+ return VN_SUCCESS;
+}
+
+
+VN_STATUS vnSampleBilinearVertical( CONST CVImage & pSrcImage, FLOAT32 fX, FLOAT32 fY, UINT8 * pRawOutput )
+{
+ if ( VN_PARAM_CHECK )
+ {
+ if ( fX < 0 || fY < 0 || !VN_IS_IMAGE_VALID(pSrcImage) || !pRawOutput )
+ {
+ return vnPostError( VN_ERROR_INVALIDARG );
+ }
+ }
+
+ //
+ // Perform a bilinear kernel at the appointed location.
+ //
+
+ VN_PIXEL_BLOCK biBlocks[2] = {0};
+ VN_PIXEL_BLOCK biTotalBlock = {0};
+
+ //
+ // We do not bias our float coordinate by 0.5 because we wish
+ // to sample using the nearest 2 pixels to our coordinate.
+ //
+
+ INT32 iSampleX = fX;
+ INT32 iSampleY = fY;
+
+ for ( UINT32 i = 0; i < 2; i++ )
+ {
+ INT32 iSourceX = vnClipRange( iSampleX, 0, pSrcImage.QueryWidth() - 1 );
+ INT32 iSourceY = vnClipRange( iSampleY + i, 0, pSrcImage.QueryHeight() - 1 );
+
+ UINT8 * pSrcPixel = pSrcImage.QueryData() + pSrcImage.BlockOffset( iSourceX, iSourceY );
+
+ vnConvertToBlock( pSrcPixel, pSrcImage.QueryFormat(), &biBlocks[ i ] );
+ }
+
+ //
+ // Interpolate our new pixel color using the two source blocks
+ //
+
+ biTotalBlock.uiPrecision = biBlocks[0].uiPrecision;
+ biTotalBlock.uiChannelCount = biBlocks[0].uiChannelCount;
+
+ //
+ // Calculate our interpolation parameter
+ //
+
+ FLOAT32 fYDelta = (FLOAT32) fY - iSampleY;
+
+ if ( VN_FAILED( vnLerpBlocks( biBlocks[0], biBlocks[1], fYDelta, &biTotalBlock ) ) )
+ {
+ return vnPostError( VN_ERROR_EXECUTION_FAILURE );
+ }
+
+ //
+ // Write our filtered result out
+ //
+
+ vnConvertFromBlock( biTotalBlock, pSrcImage.QueryFormat(), pRawOutput );
+
+ return VN_SUCCESS;
+}
+
+VN_STATUS vnBilinearKernel( CONST CVImage & pSrcImage, FLOAT32 fX, FLOAT32 fY, BOOL bDirection, UINT8 * pRawOutput )
+{
+ if ( VN_PARAM_CHECK )
+ {
+ if ( !pRawOutput || !VN_IS_IMAGE_VALID(pSrcImage) )
+ {
+ return vnPostError( VN_ERROR_INVALIDARG );
+ }
+ }
+
+ //
+ // Compute the horizontal or vertical gaussian sample at the requested pixel coordinate.
+ //
+
+ switch ( bDirection )
+ {
+ case FALSE: return vnSampleBilinearHorizontal( pSrcImage, fX, fY, pRawOutput );
+ case TRUE: return vnSampleBilinearVertical( pSrcImage, fX, fY, pRawOutput );
+ }
+
+ return VN_ERROR_NOTIMPL;
+}
\ No newline at end of file
diff --git a/src/libs/image-resampler/Kernels/vnImageBilinear.h b/src/libs/image-resampler/Kernels/vnImageBilinear.h
new file mode 100644
index 000000000..fc49d5687
--- /dev/null
+++ b/src/libs/image-resampler/Kernels/vnImageBilinear.h
@@ -0,0 +1,92 @@
+
+//
+// Copyright (c) 2002-2009 Joe Bertolami. All Right Reserved.
+//
+// vnImageBilinear.h
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice, this
+// list of conditions and the following disclaimer.
+//
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Additional Information:
+//
+// For more information, visit http://www.bertolami.com.
+//
+
+#ifndef __VN_BILINEAR_H__
+#define __VN_BILINEAR_H__
+
+#include "../Base/vnBase.h"
+#include "../Base/vnMath.h"
+#include "../Base/vnImage.h"
+
+//
+// Bilinear Kernel
+//
+// BilinearKernel performs a simple combined 2D bilinear sampling of a source image.
+// Bilinear filters are separable, but this function performs a non-separated kernel
+// that facilitates non-uniform sampling filters.
+//
+// Parameters:
+//
+// pSrcImage: The read-only source image to filter.
+//
+// fX, fY: Specifies the coordinates to sample in the source image.
+//
+// pRawOutput: a pointer to a pixel buffer. Upon return, this buffer will contain the
+// sampled result of the operation.
+//
+// Supported Image Formats:
+//
+// All image formats are supported.
+//
+
+VN_STATUS vnBilinearKernel( CONST CVImage & pSrcImage, FLOAT32 fX, FLOAT32 fY, UINT8 * pRawOutput );
+
+//
+// Bilinear Kernel
+//
+// BilinearKernel performs a simple horizontal or vertical bilinear sampling of a
+// source image.
+//
+// (!) Note: bilinear filters are separable. This interface may be used to improve
+// operator performance by sampling the horizontal and vertical directions
+// separately.
+//
+// Parameters:
+//
+// pSrcImage: The read-only source image to filter.
+//
+// fX, fY: Specifies the coordinates to sample in the source image.
+//
+// bDirection: Specifies the direction to sample. FALSE indicates horizontal sampling,
+// while TRUE indicates vertical.
+//
+// pRawOutput: a pointer to a pixel buffer. Upon return, this buffer will contain the
+// sampled result of the operation.
+//
+// Supported Image Formats:
+//
+// All image formats are supported.
+//
+
+VN_STATUS vnBilinearKernel( CONST CVImage & pSrcImage, FLOAT32 fX, FLOAT32 fY, BOOL bDirection, UINT8 * pRawOutput );
+
+#endif // __VN_BILINEAR_H__
\ No newline at end of file
diff --git a/src/libs/image-resampler/Kernels/vnImageCoverage.cpp b/src/libs/image-resampler/Kernels/vnImageCoverage.cpp
new file mode 100644
index 000000000..212b9c6ec
--- /dev/null
+++ b/src/libs/image-resampler/Kernels/vnImageCoverage.cpp
@@ -0,0 +1,384 @@
+
+#include "vnImageCoverage.h"
+
+#include "../Utilities/vnImageBlock.h"
+
+VN_STATUS vnCoverageKernel( CONST CVImage & pSrcImage, FLOAT32 fX, FLOAT32 fY, FLOAT32 fRadius, UINT8 * pRawOutput )
+{
+ if ( VN_PARAM_CHECK )
+ {
+ if ( 0 == fRadius || !pRawOutput || !VN_IS_IMAGE_VALID(pSrcImage) )
+ {
+ return vnPostError( VN_ERROR_INVALIDARG );
+ }
+ }
+
+ FLOAT32 fSampleCount = 0;
+ VN_PIXEL_BLOCK gTotalBlock = {0};
+ VN_PIXEL_BLOCK gTempBlock = {0};
+ INT32 iRadius = fRadius + 1.0f;
+
+ //
+ // Scan the kernel space adding up the pixel values
+ //
+
+ FLOAT32 fMaxDistance = sqrtf( (FLOAT32) fRadius * fRadius + fRadius * fRadius );
+
+ for ( INT32 j = -iRadius + 1; j <= iRadius; j++ )
+ for ( INT32 i = -iRadius + 1; i <= iRadius; i++ )
+ {
+ INT32 iX = (INT32) fX + i;
+ INT32 iY = (INT32) fY + j;
+
+ if ( iX < 0 || iY < 0 ||
+ iX > pSrcImage.QueryWidth() - 1 ||
+ iY > pSrcImage.QueryHeight() - 1 )
+ {
+ continue;
+ }
+
+ UINT8 * pSrcPixel = pSrcImage.QueryData() + pSrcImage.BlockOffset( iX, iY );
+
+ FLOAT32 fXDelta = (FLOAT32) fX - iX;
+ FLOAT32 fYDelta = (FLOAT32) fY - iY;
+ FLOAT32 fDistance = sqrtf( fXDelta * fXDelta + fYDelta * fYDelta );
+ FLOAT32 fWeight = 0.0f;
+
+ vnConvertToBlock( pSrcPixel, pSrcImage.QueryFormat(), &gTempBlock );
+
+ //
+ // The coverage filter performs slightly differently based on whether the image is
+ // being magnified or minified. If the image is not undergoing a scaling operation,
+ // the coverage filter will pass-through the image without making any modification.
+ //
+
+ //
+ // Minification: computes a simple distance based weighted average:
+ //
+
+ if ( fRadius >= 1.0 )
+ {
+ fDistance = VN_MIN2( fMaxDistance, fDistance );
+
+ fWeight = 1.0f - fDistance / fMaxDistance;
+ }
+
+ //
+ // Magnification:
+ //
+ // If the destination pixel is significantly far away from any source pixel, then we
+ // interpolate to find a new value.
+ //
+ // If the destination pixel is sufficiently close to a source pixel, we sample it
+ // directly and pass it to the output.
+ //
+
+ else
+ {
+ if ( fDistance >= 0.5f - fRadius )
+ {
+ fWeight = 1.0f - fDistance;
+ }
+ else
+ {
+ return vnConvertFromBlock( gTempBlock, pSrcImage.QueryFormat(), pRawOutput );
+ }
+ }
+
+ if ( VN_IMAGE_PRECISION_FLOAT == gTempBlock.uiPrecision )
+ {
+ gTotalBlock.fChannelData[0] += fWeight * gTempBlock.fChannelData[0];
+ gTotalBlock.fChannelData[1] += fWeight * gTempBlock.fChannelData[1];
+ gTotalBlock.fChannelData[2] += fWeight * gTempBlock.fChannelData[2];
+ gTotalBlock.fChannelData[3] += fWeight * gTempBlock.fChannelData[3];
+ }
+ else
+ {
+ gTotalBlock.iChannelData[0] += fWeight * gTempBlock.iChannelData[0];
+ gTotalBlock.iChannelData[1] += fWeight * gTempBlock.iChannelData[1];
+ gTotalBlock.iChannelData[2] += fWeight * gTempBlock.iChannelData[2];
+ gTotalBlock.iChannelData[3] += fWeight * gTempBlock.iChannelData[3];
+ }
+
+ fSampleCount += fWeight;
+ }
+
+ //
+ // Normalize our simple sum back to the valid pixel range
+ //
+
+ FLOAT32 fScaleFactor = 1.0f / fSampleCount;
+
+ gTotalBlock.uiPrecision = gTempBlock.uiPrecision;
+ gTotalBlock.uiChannelCount = gTempBlock.uiChannelCount;
+
+ if ( VN_IMAGE_PRECISION_FLOAT == gTempBlock.uiPrecision )
+ {
+ gTotalBlock.fChannelData[0] = fScaleFactor * gTotalBlock.fChannelData[0];
+ gTotalBlock.fChannelData[1] = fScaleFactor * gTotalBlock.fChannelData[1];
+ gTotalBlock.fChannelData[2] = fScaleFactor * gTotalBlock.fChannelData[2];
+ gTotalBlock.fChannelData[3] = fScaleFactor * gTotalBlock.fChannelData[3];
+ }
+ else
+ {
+ gTotalBlock.iChannelData[0] = fScaleFactor * gTotalBlock.iChannelData[0];
+ gTotalBlock.iChannelData[1] = fScaleFactor * gTotalBlock.iChannelData[1];
+ gTotalBlock.iChannelData[2] = fScaleFactor * gTotalBlock.iChannelData[2];
+ gTotalBlock.iChannelData[3] = fScaleFactor * gTotalBlock.iChannelData[3];
+ }
+
+ //
+ // Write our weighted sum to our output
+ //
+
+ vnConvertFromBlock( gTotalBlock, pSrcImage.QueryFormat(), pRawOutput );
+
+ return VN_SUCCESS;
+}
+
+VN_STATUS vnSampleCoverageVertical( CONST CVImage & pSrcImage, FLOAT32 fX, FLOAT32 fY, FLOAT32 fRadius, UINT8 * pRawOutput )
+{
+ if ( VN_PARAM_CHECK )
+ {
+ if ( 0 == fRadius || !pRawOutput || !VN_IS_IMAGE_VALID(pSrcImage) )
+ {
+ return vnPostError( VN_ERROR_INVALIDARG );
+ }
+ }
+
+ FLOAT32 fSampleCount = 0;
+ VN_PIXEL_BLOCK gTotalBlock = {0};
+ VN_PIXEL_BLOCK gTempBlock = {0};
+ INT32 iRadius = fRadius + 1.0f;
+
+ //
+ // Scan the kernel space adding up the pixel values
+ //
+
+ FLOAT32 fMaxDistance = fRadius;
+
+ for ( INT32 j = -iRadius + 1; j <= iRadius; j++ )
+ {
+ INT32 iX = (INT32) fX;
+ INT32 iY = (INT32) fY + j;
+
+ if ( iX < 0 || iY < 0 ||
+ iX > pSrcImage.QueryWidth() - 1 ||
+ iY > pSrcImage.QueryHeight() - 1 )
+ {
+ continue;
+ }
+
+ UINT8 * pSrcPixel = pSrcImage.QueryData() + pSrcImage.BlockOffset( iX, iY );
+
+ FLOAT32 fYDelta = (FLOAT32) fY - iY;
+ FLOAT32 fDistance = fabs( fYDelta );
+ FLOAT32 fWeight = 0.0f;
+
+ vnConvertToBlock( pSrcPixel, pSrcImage.QueryFormat(), &gTempBlock );
+
+ //
+ // If we're minifying, then we can compute a simple distance based weighted average
+ // using our calculated radius (fRadius)
+ //
+
+ if ( fRadius >= 1.0 )
+ {
+ fDistance = VN_MIN2( fMaxDistance, fDistance );
+ fWeight = 1.0f - fDistance / fMaxDistance;
+ }
+ else
+ {
+ if ( fDistance >= 0.5f - fRadius )
+ {
+ fWeight = 1.0f - fDistance;
+ }
+ else
+ {
+ return vnConvertFromBlock( gTempBlock, pSrcImage.QueryFormat(), pRawOutput );
+ }
+ }
+
+ if ( VN_IMAGE_PRECISION_FLOAT == gTempBlock.uiPrecision )
+ {
+ gTotalBlock.fChannelData[0] += fWeight * gTempBlock.fChannelData[0];
+ gTotalBlock.fChannelData[1] += fWeight * gTempBlock.fChannelData[1];
+ gTotalBlock.fChannelData[2] += fWeight * gTempBlock.fChannelData[2];
+ gTotalBlock.fChannelData[3] += fWeight * gTempBlock.fChannelData[3];
+ }
+ else
+ {
+ gTotalBlock.iChannelData[0] += fWeight * gTempBlock.iChannelData[0];
+ gTotalBlock.iChannelData[1] += fWeight * gTempBlock.iChannelData[1];
+ gTotalBlock.iChannelData[2] += fWeight * gTempBlock.iChannelData[2];
+ gTotalBlock.iChannelData[3] += fWeight * gTempBlock.iChannelData[3];
+ }
+
+ fSampleCount += fWeight;
+ }
+
+ //
+ // Normalize our sum back to the valid pixel range
+ //
+
+ FLOAT32 fScaleFactor = 1.0f / fSampleCount;
+
+ gTotalBlock.uiPrecision = gTempBlock.uiPrecision;
+ gTotalBlock.uiChannelCount = gTempBlock.uiChannelCount;
+
+ if ( VN_IMAGE_PRECISION_FLOAT == gTempBlock.uiPrecision )
+ {
+ gTotalBlock.fChannelData[0] = fScaleFactor * gTotalBlock.fChannelData[0];
+ gTotalBlock.fChannelData[1] = fScaleFactor * gTotalBlock.fChannelData[1];
+ gTotalBlock.fChannelData[2] = fScaleFactor * gTotalBlock.fChannelData[2];
+ gTotalBlock.fChannelData[3] = fScaleFactor * gTotalBlock.fChannelData[3];
+ }
+ else
+ {
+ gTotalBlock.iChannelData[0] = fScaleFactor * gTotalBlock.iChannelData[0];
+ gTotalBlock.iChannelData[1] = fScaleFactor * gTotalBlock.iChannelData[1];
+ gTotalBlock.iChannelData[2] = fScaleFactor * gTotalBlock.iChannelData[2];
+ gTotalBlock.iChannelData[3] = fScaleFactor * gTotalBlock.iChannelData[3];
+ }
+
+ //
+ // Write our weighted sum to our output
+ //
+
+ vnConvertFromBlock( gTotalBlock, pSrcImage.QueryFormat(), pRawOutput );
+
+ return VN_SUCCESS;
+}
+
+VN_STATUS vnSampleCoverageHorizontal( CONST CVImage & pSrcImage, FLOAT32 fX, FLOAT32 fY, FLOAT32 fRadius, UINT8 * pRawOutput )
+{
+ if ( VN_PARAM_CHECK )
+ {
+ if ( 0 == fRadius || !pRawOutput || !VN_IS_IMAGE_VALID(pSrcImage) )
+ {
+ return vnPostError( VN_ERROR_INVALIDARG );
+ }
+ }
+
+ FLOAT32 fSampleCount = 0;
+ VN_PIXEL_BLOCK gTotalBlock = {0};
+ VN_PIXEL_BLOCK gTempBlock = {0};
+ INT32 iRadius = fRadius + 1.0f;
+
+ //
+ // Scan the kernel space adding up the pixel values
+ //
+
+ FLOAT32 fMaxDistance = fRadius;
+
+ for ( INT32 i = -iRadius + 1; i <= iRadius; i++ )
+ {
+ INT32 iX = (INT32) fX + i;
+ INT32 iY = (INT32) fY;
+
+ if ( iX < 0 || iY < 0 ||
+ iX > pSrcImage.QueryWidth() - 1 ||
+ iY > pSrcImage.QueryHeight() - 1 )
+ {
+ continue;
+ }
+
+ UINT8 * pSrcPixel = pSrcImage.QueryData() + pSrcImage.BlockOffset( iX, iY );
+
+ FLOAT32 fXDelta = (FLOAT32) fX - iX;
+ FLOAT32 fDistance = fabs( fXDelta );
+ FLOAT32 fWeight = 0.0f;
+
+ vnConvertToBlock( pSrcPixel, pSrcImage.QueryFormat(), &gTempBlock );
+
+ if ( fRadius >= 1.0 )
+ {
+ fDistance = VN_MIN2( fMaxDistance, fDistance );
+ fWeight = 1.0f - fDistance / fMaxDistance;
+ }
+
+ else
+ {
+ if ( fDistance >= 0.5f - fRadius )
+ {
+ fWeight = 1.0f - fDistance;
+ }
+ else
+ {
+ return vnConvertFromBlock( gTempBlock, pSrcImage.QueryFormat(), pRawOutput );
+ }
+ }
+
+ if ( VN_IMAGE_PRECISION_FLOAT == gTempBlock.uiPrecision )
+ {
+ gTotalBlock.fChannelData[0] += fWeight * gTempBlock.fChannelData[0];
+ gTotalBlock.fChannelData[1] += fWeight * gTempBlock.fChannelData[1];
+ gTotalBlock.fChannelData[2] += fWeight * gTempBlock.fChannelData[2];
+ gTotalBlock.fChannelData[3] += fWeight * gTempBlock.fChannelData[3];
+ }
+ else
+ {
+ gTotalBlock.iChannelData[0] += fWeight * gTempBlock.iChannelData[0];
+ gTotalBlock.iChannelData[1] += fWeight * gTempBlock.iChannelData[1];
+ gTotalBlock.iChannelData[2] += fWeight * gTempBlock.iChannelData[2];
+ gTotalBlock.iChannelData[3] += fWeight * gTempBlock.iChannelData[3];
+ }
+
+ fSampleCount += fWeight;
+ }
+
+ //
+ // Normalize our sum back to the valid pixel range
+ //
+
+ FLOAT32 fScaleFactor = 1.0f / fSampleCount;
+
+ gTotalBlock.uiPrecision = gTempBlock.uiPrecision;
+ gTotalBlock.uiChannelCount = gTempBlock.uiChannelCount;
+
+ if ( VN_IMAGE_PRECISION_FLOAT == gTempBlock.uiPrecision )
+ {
+ gTotalBlock.fChannelData[0] = fScaleFactor * gTotalBlock.fChannelData[0];
+ gTotalBlock.fChannelData[1] = fScaleFactor * gTotalBlock.fChannelData[1];
+ gTotalBlock.fChannelData[2] = fScaleFactor * gTotalBlock.fChannelData[2];
+ gTotalBlock.fChannelData[3] = fScaleFactor * gTotalBlock.fChannelData[3];
+ }
+ else
+ {
+ gTotalBlock.iChannelData[0] = fScaleFactor * gTotalBlock.iChannelData[0];
+ gTotalBlock.iChannelData[1] = fScaleFactor * gTotalBlock.iChannelData[1];
+ gTotalBlock.iChannelData[2] = fScaleFactor * gTotalBlock.iChannelData[2];
+ gTotalBlock.iChannelData[3] = fScaleFactor * gTotalBlock.iChannelData[3];
+ }
+
+ //
+ // Write our weighted sum to our output
+ //
+
+ vnConvertFromBlock( gTotalBlock, pSrcImage.QueryFormat(), pRawOutput );
+
+ return VN_SUCCESS;
+}
+
+VN_STATUS vnCoverageKernel( CONST CVImage & pSrcImage, FLOAT32 fX, FLOAT32 fY, BOOL bDirection, FLOAT32 fRadius, UINT8 * pRawOutput )
+{
+ if ( VN_PARAM_CHECK )
+ {
+ if ( 0 == fRadius || !pRawOutput || !VN_IS_IMAGE_VALID(pSrcImage) )
+ {
+ return vnPostError( VN_ERROR_INVALIDARG );
+ }
+ }
+
+ //
+ // Compute the horizontal or vertical average sample at the requested pixel coordinate.
+ //
+
+ switch ( bDirection )
+ {
+ case FALSE: return vnSampleCoverageHorizontal( pSrcImage, fX, fY, fRadius, pRawOutput );
+ case TRUE: return vnSampleCoverageVertical( pSrcImage, fX, fY, fRadius, pRawOutput );
+ }
+
+ return VN_ERROR_NOTIMPL;
+}
\ No newline at end of file
diff --git a/src/libs/image-resampler/Kernels/vnImageCoverage.h b/src/libs/image-resampler/Kernels/vnImageCoverage.h
new file mode 100644
index 000000000..f3442bef8
--- /dev/null
+++ b/src/libs/image-resampler/Kernels/vnImageCoverage.h
@@ -0,0 +1,94 @@
+
+//
+// Copyright (c) 2002-2009 Joe Bertolami. All Right Reserved.
+//
+// vnImageCoverage.h
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice, this
+// list of conditions and the following disclaimer.
+//
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Additional Information:
+//
+// For more information, visit http://www.bertolami.com.
+//
+
+#ifndef __VN_IMAGE_COVERAGE_H__
+#define __VN_IMAGE_COVERAGE_H__
+
+#include "../Base/vnBase.h"
+#include "../Base/vnMath.h"
+#include "../Base/vnImage.h"
+
+//
+// Coverage Kernel
+//
+// CoverageKernel performs a simple 2D coverage sampling of a source image.
+//
+// Parameters:
+//
+// pSrcImage: The read-only source image to filter.
+//
+// fX, fY: Specifies the coordinates to sample in the source image.
+//
+// fRadius: The radius to sample with the average kernel.
+//
+// pRawOutput: a pointer to a pixel buffer. Upon return, this buffer will contain the
+// sampled result of the operation.
+//
+// Supported Image Formats:
+//
+// All image formats are supported.
+//
+
+VN_STATUS vnCoverageKernel( CONST CVImage & pSrcImage, FLOAT32 fX, FLOAT32 fY, FLOAT32 fRadius, UINT8 * pRawOutput );
+
+//
+// Coverage Kernel
+//
+// CoverageKernel performs a simple horizontal or vertical coverage sampling of a
+// source image.
+//
+// (!) Note: coverage filters are separable. This interface may be used to improve
+// operator performance by sampling the horizontal and vertical directions
+// separately.
+//
+// Parameters:
+//
+// pSrcImage: The read-only source image to filter.
+//
+// fX, fY: Specifies the coordinates to sample in the source image.
+//
+// bDirection: Specifies the direction to sample. FALSE indicates horizontal sampling,
+// while TRUE indicates vertical.
+//
+// fRadius: The radius to sample with the average kernel.
+//
+// pRawOutput: a pointer to a pixel buffer. Upon return, this buffer will contain the
+// sampled result of the operation.
+//
+// Supported Image Formats:
+//
+// All image formats are supported.
+//
+
+VN_STATUS vnCoverageKernel( CONST CVImage & pSrcImage, FLOAT32 fX, FLOAT32 fY, BOOL bDirection, FLOAT32 fRadius, UINT8 * pRawOutput );
+
+#endif // __VN_IMAGE_COVERAGE_H__
diff --git a/src/libs/image-resampler/Kernels/vnImageGaussian.cpp b/src/libs/image-resampler/Kernels/vnImageGaussian.cpp
new file mode 100644
index 000000000..ea906216e
--- /dev/null
+++ b/src/libs/image-resampler/Kernels/vnImageGaussian.cpp
@@ -0,0 +1,305 @@
+
+#include "vnImageGaussian.h"
+
+#include "../Utilities/vnImageBlock.h"
+
+VN_STATUS vnGaussianKernel( CONST CVImage & pSrcImage, FLOAT32 fX, FLOAT32 fY, FLOAT32 fRadius, UINT8 * pRawOutput )
+{
+ if ( VN_PARAM_CHECK )
+ {
+ if ( 0 == fRadius || !pRawOutput || !VN_IS_IMAGE_VALID(pSrcImage) )
+ {
+ return vnPostError( VN_ERROR_INVALIDARG );
+ }
+ }
+
+ FLOAT32 fSampleCount = 0;
+ VN_PIXEL_BLOCK gTotalBlock = {0};
+ VN_PIXEL_BLOCK gTempBlock = {0};
+ INT32 iRadius = fRadius + 1.0f;
+
+ //
+ // Scan the kernel space adding up the gaussian weights and pixel values
+ //
+
+ FLOAT32 fMaxDistance = sqrtf( (FLOAT32) iRadius * iRadius + iRadius * iRadius );
+
+ for ( INT32 j = -iRadius; j <= iRadius; j++ )
+ for ( INT32 i = -iRadius; i <= iRadius; i++ )
+ {
+ INT32 iX = (INT32) fX + i;
+ INT32 iY = (INT32) fY + j;
+
+ if ( iX < 0 || iY < 0 ||
+ iX > pSrcImage.QueryWidth() - 1 ||
+ iY > pSrcImage.QueryHeight() - 1 )
+ {
+ continue;
+ }
+
+ UINT8 * pSrcPixel = pSrcImage.QueryData() + pSrcImage.BlockOffset( iX, iY );
+
+ FLOAT32 fXDelta = (FLOAT32) fX - iX;
+ FLOAT32 fYDelta = (FLOAT32) fY - iY;
+ FLOAT32 fDistance = sqrtf( fXDelta * fXDelta + fYDelta * fYDelta );
+ FLOAT32 fWeight = vnGaussianWeight( (FLOAT32) fDistance, fMaxDistance );
+
+ vnConvertToBlock( pSrcPixel, pSrcImage.QueryFormat(), &gTempBlock );
+
+ if ( VN_IMAGE_PRECISION_FLOAT == gTempBlock.uiPrecision )
+ {
+ gTotalBlock.fChannelData[0] += fWeight * gTempBlock.fChannelData[0];
+ gTotalBlock.fChannelData[1] += fWeight * gTempBlock.fChannelData[1];
+ gTotalBlock.fChannelData[2] += fWeight * gTempBlock.fChannelData[2];
+ gTotalBlock.fChannelData[3] += fWeight * gTempBlock.fChannelData[3];
+ }
+ else
+ {
+ gTotalBlock.iChannelData[0] += fWeight * gTempBlock.iChannelData[0];
+ gTotalBlock.iChannelData[1] += fWeight * gTempBlock.iChannelData[1];
+ gTotalBlock.iChannelData[2] += fWeight * gTempBlock.iChannelData[2];
+ gTotalBlock.iChannelData[3] += fWeight * gTempBlock.iChannelData[3];
+ }
+
+ fSampleCount += fWeight;
+ }
+
+ //
+ // Normalize our gaussian sum back to the valid pixel range
+ //
+
+ FLOAT32 fScaleFactor = 1.0f / fSampleCount;
+
+ gTotalBlock.uiPrecision = gTempBlock.uiPrecision;
+ gTotalBlock.uiChannelCount = gTempBlock.uiChannelCount;
+
+ if ( VN_IMAGE_PRECISION_FLOAT == gTempBlock.uiPrecision )
+ {
+ gTotalBlock.fChannelData[0] = fScaleFactor * gTotalBlock.fChannelData[0];
+ gTotalBlock.fChannelData[1] = fScaleFactor * gTotalBlock.fChannelData[1];
+ gTotalBlock.fChannelData[2] = fScaleFactor * gTotalBlock.fChannelData[2];
+ gTotalBlock.fChannelData[3] = fScaleFactor * gTotalBlock.fChannelData[3];
+ }
+ else
+ {
+ gTotalBlock.iChannelData[0] = fScaleFactor * gTotalBlock.iChannelData[0];
+ gTotalBlock.iChannelData[1] = fScaleFactor * gTotalBlock.iChannelData[1];
+ gTotalBlock.iChannelData[2] = fScaleFactor * gTotalBlock.iChannelData[2];
+ gTotalBlock.iChannelData[3] = fScaleFactor * gTotalBlock.iChannelData[3];
+ }
+
+ //
+ // Write our weighted sum to our output
+ //
+
+ vnConvertFromBlock( gTotalBlock, pSrcImage.QueryFormat(), pRawOutput );
+
+ return VN_SUCCESS;
+}
+
+VN_STATUS vnSampleGaussianVertical( CONST CVImage & pSrcImage, FLOAT32 fX, FLOAT32 fY, FLOAT32 fRadius, UINT8 * pRawOutput )
+{
+ if ( VN_PARAM_CHECK )
+ {
+ if ( 0 == fRadius || !pRawOutput || !VN_IS_IMAGE_VALID(pSrcImage) )
+ {
+ return vnPostError( VN_ERROR_INVALIDARG );
+ }
+ }
+
+ FLOAT32 fSampleCount = 0;
+ VN_PIXEL_BLOCK gTotalBlock = {0};
+ VN_PIXEL_BLOCK gTempBlock = {0};
+ INT32 iRadius = fRadius + 1.0f;
+
+ //
+ // Scan the kernel space adding up the gaussian weights and pixel values
+ //
+
+ FLOAT32 fMaxDistance = fRadius + 1.0f;
+
+ for ( INT32 j = -iRadius; j <= iRadius; j++ )
+ {
+ INT32 iX = (INT32) fX;
+ INT32 iY = (INT32) fY + j;
+
+ if ( iX < 0 || iY < 0 ||
+ iX > pSrcImage.QueryWidth() - 1 ||
+ iY > pSrcImage.QueryHeight() - 1 )
+ {
+ continue;
+ }
+
+ UINT8 * pSrcPixel = pSrcImage.QueryData() + pSrcImage.BlockOffset( iX, iY );
+
+ FLOAT32 fYDelta = (FLOAT32) fY - iY;
+ FLOAT32 fDistance = fabs( fYDelta );
+ FLOAT32 fWeight = vnGaussianWeight( (FLOAT32) fDistance, fMaxDistance );
+
+ vnConvertToBlock( pSrcPixel, pSrcImage.QueryFormat(), &gTempBlock );
+
+ if ( VN_IMAGE_PRECISION_FLOAT == gTempBlock.uiPrecision )
+ {
+ gTotalBlock.fChannelData[0] += fWeight * gTempBlock.fChannelData[0];
+ gTotalBlock.fChannelData[1] += fWeight * gTempBlock.fChannelData[1];
+ gTotalBlock.fChannelData[2] += fWeight * gTempBlock.fChannelData[2];
+ gTotalBlock.fChannelData[3] += fWeight * gTempBlock.fChannelData[3];
+ }
+ else
+ {
+ gTotalBlock.iChannelData[0] += fWeight * gTempBlock.iChannelData[0];
+ gTotalBlock.iChannelData[1] += fWeight * gTempBlock.iChannelData[1];
+ gTotalBlock.iChannelData[2] += fWeight * gTempBlock.iChannelData[2];
+ gTotalBlock.iChannelData[3] += fWeight * gTempBlock.iChannelData[3];
+ }
+
+ fSampleCount += fWeight;
+ }
+
+ //
+ // Normalize our gaussian sum back to the valid pixel range
+ //
+
+ FLOAT32 fScaleFactor = 1.0f / fSampleCount;
+
+ gTotalBlock.uiPrecision = gTempBlock.uiPrecision;
+ gTotalBlock.uiChannelCount = gTempBlock.uiChannelCount;
+
+ if ( VN_IMAGE_PRECISION_FLOAT == gTempBlock.uiPrecision )
+ {
+ gTotalBlock.fChannelData[0] = fScaleFactor * gTotalBlock.fChannelData[0];
+ gTotalBlock.fChannelData[1] = fScaleFactor * gTotalBlock.fChannelData[1];
+ gTotalBlock.fChannelData[2] = fScaleFactor * gTotalBlock.fChannelData[2];
+ gTotalBlock.fChannelData[3] = fScaleFactor * gTotalBlock.fChannelData[3];
+ }
+ else
+ {
+ gTotalBlock.iChannelData[0] = fScaleFactor * gTotalBlock.iChannelData[0];
+ gTotalBlock.iChannelData[1] = fScaleFactor * gTotalBlock.iChannelData[1];
+ gTotalBlock.iChannelData[2] = fScaleFactor * gTotalBlock.iChannelData[2];
+ gTotalBlock.iChannelData[3] = fScaleFactor * gTotalBlock.iChannelData[3];
+ }
+
+ //
+ // Write our weighted sum to our output
+ //
+
+ vnConvertFromBlock( gTotalBlock, pSrcImage.QueryFormat(), pRawOutput );
+
+ return VN_SUCCESS;
+}
+
+VN_STATUS vnSampleGaussianHorizontal( CONST CVImage & pSrcImage, FLOAT32 fX, FLOAT32 fY, FLOAT32 fRadius, UINT8 * pRawOutput )
+{
+ if ( VN_PARAM_CHECK )
+ {
+ if ( 0 == fRadius || !pRawOutput || !VN_IS_IMAGE_VALID(pSrcImage) )
+ {
+ return vnPostError( VN_ERROR_INVALIDARG );
+ }
+ }
+
+ FLOAT32 fSampleCount = 0;
+ VN_PIXEL_BLOCK gTotalBlock = {0};
+ VN_PIXEL_BLOCK gTempBlock = {0};
+ INT32 iRadius = fRadius + 1.0f;
+
+ //
+ // Scan the kernel space adding up the gaussian weights and pixel values
+ //
+
+ FLOAT32 fMaxDistance = fRadius + 1.0f;
+
+ for ( INT32 i = -iRadius; i <= iRadius; i++ )
+ {
+ INT32 iX = (INT32) fX + i;
+ INT32 iY = (INT32) fY;
+
+ if ( iX < 0 || iY < 0 ||
+ iX > pSrcImage.QueryWidth() - 1 ||
+ iY > pSrcImage.QueryHeight() - 1 )
+ {
+ continue;
+ }
+
+ UINT8 * pSrcPixel = pSrcImage.QueryData() + pSrcImage.BlockOffset( iX, iY );
+
+ FLOAT32 fXDelta = (FLOAT32) fX - iX;
+ FLOAT32 fDistance = fabs( fXDelta );
+ FLOAT32 fWeight = vnGaussianWeight( (FLOAT32) fDistance, fMaxDistance );
+
+ vnConvertToBlock( pSrcPixel, pSrcImage.QueryFormat(), &gTempBlock );
+
+ if ( VN_IMAGE_PRECISION_FLOAT == gTempBlock.uiPrecision )
+ {
+ gTotalBlock.fChannelData[0] += fWeight * gTempBlock.fChannelData[0];
+ gTotalBlock.fChannelData[1] += fWeight * gTempBlock.fChannelData[1];
+ gTotalBlock.fChannelData[2] += fWeight * gTempBlock.fChannelData[2];
+ gTotalBlock.fChannelData[3] += fWeight * gTempBlock.fChannelData[3];
+ }
+ else
+ {
+ gTotalBlock.iChannelData[0] += fWeight * gTempBlock.iChannelData[0];
+ gTotalBlock.iChannelData[1] += fWeight * gTempBlock.iChannelData[1];
+ gTotalBlock.iChannelData[2] += fWeight * gTempBlock.iChannelData[2];
+ gTotalBlock.iChannelData[3] += fWeight * gTempBlock.iChannelData[3];
+ }
+
+ fSampleCount += fWeight;
+ }
+
+ //
+ // Normalize our gaussian sum back to the valid pixel range
+ //
+
+ FLOAT32 fScaleFactor = 1.0f / fSampleCount;
+
+ gTotalBlock.uiPrecision = gTempBlock.uiPrecision;
+ gTotalBlock.uiChannelCount = gTempBlock.uiChannelCount;
+
+ if ( VN_IMAGE_PRECISION_FLOAT == gTempBlock.uiPrecision )
+ {
+ gTotalBlock.fChannelData[0] = fScaleFactor * gTotalBlock.fChannelData[0];
+ gTotalBlock.fChannelData[1] = fScaleFactor * gTotalBlock.fChannelData[1];
+ gTotalBlock.fChannelData[2] = fScaleFactor * gTotalBlock.fChannelData[2];
+ gTotalBlock.fChannelData[3] = fScaleFactor * gTotalBlock.fChannelData[3];
+ }
+ else
+ {
+ gTotalBlock.iChannelData[0] = fScaleFactor * gTotalBlock.iChannelData[0];
+ gTotalBlock.iChannelData[1] = fScaleFactor * gTotalBlock.iChannelData[1];
+ gTotalBlock.iChannelData[2] = fScaleFactor * gTotalBlock.iChannelData[2];
+ gTotalBlock.iChannelData[3] = fScaleFactor * gTotalBlock.iChannelData[3];
+ }
+
+ //
+ // Write our weighted sum to our output
+ //
+
+ vnConvertFromBlock( gTotalBlock, pSrcImage.QueryFormat(), pRawOutput );
+
+ return VN_SUCCESS;
+}
+
+VN_STATUS vnGaussianKernel( CONST CVImage & pSrcImage, FLOAT32 fX, FLOAT32 fY, BOOL bDirection, FLOAT32 fRadius, UINT8 * pRawOutput )
+{
+ if ( VN_PARAM_CHECK )
+ {
+ if ( 0 == fRadius || !pRawOutput || !VN_IS_IMAGE_VALID(pSrcImage) )
+ {
+ return vnPostError( VN_ERROR_INVALIDARG );
+ }
+ }
+
+ //
+ // Compute the horizontal or vertical gaussian sample at the requested pixel coordinate.
+ //
+
+ switch ( bDirection )
+ {
+ case FALSE: return vnSampleGaussianHorizontal( pSrcImage, fX, fY, fRadius, pRawOutput );
+ case TRUE: return vnSampleGaussianVertical( pSrcImage, fX, fY, fRadius, pRawOutput );
+ }
+
+ return VN_ERROR_NOTIMPL;
+}
\ No newline at end of file
diff --git a/src/libs/image-resampler/Kernels/vnImageGaussian.h b/src/libs/image-resampler/Kernels/vnImageGaussian.h
new file mode 100644
index 000000000..0b5d1cbbe
--- /dev/null
+++ b/src/libs/image-resampler/Kernels/vnImageGaussian.h
@@ -0,0 +1,120 @@
+
+//
+// Copyright (c) 2002-2009 Joe Bertolami. All Right Reserved.
+//
+// vnImageGaussian.h
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice, this
+// list of conditions and the following disclaimer.
+//
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Additional Information:
+//
+// For more information, visit http://www.bertolami.com.
+//
+
+#ifndef __VN_IMAGE_GAUSSIAN_H__
+#define __VN_IMAGE_GAUSSIAN_H__
+
+#include "../Base/vnBase.h"
+#include "../Base/vnMath.h"
+#include "../Base/vnImage.h"
+
+//
+// Our simple gaussian distribution function with mean of zero and std-dev (d):
+//
+// 1.0 -( x^2 / ( 2 * d * d ) )
+// f(x) = ----------------- * e
+// 1/2
+// d * ( 2 * Pi )
+//
+
+inline FLOAT32 vnGaussianWeight( FLOAT32 fDistance, FLOAT32 fRadius )
+{
+ FLOAT32 fRange = fDistance / ( fRadius );
+
+ //
+ // Gaussian function with mean = 0 and variance = 0.1.
+ //
+
+ static CONST FLOAT32 g_variance = 0.1f;
+ static CONST FLOAT32 g_stddev = sqrt( g_variance );
+ static CONST FLOAT32 g_coeff = 1.0f / ( g_stddev * sqrt( 2.0 * VN_PI ) );
+
+ return g_coeff * exp( -1.0f * ( fRange * fRange ) / ( 2.0 * g_variance ) );
+}
+
+//
+// Gaussian Kernel
+//
+// GaussianKernel performs a simple combined 2D gaussian sampling of a source image.
+// Gaussian filters are separable, but this function performs a non-separated kernel
+// that facilitates non-uniform sampling filters.
+//
+// Parameters:
+//
+// pSrcImage: The read-only source image to filter.
+//
+// uiX, uiY: Specifies the coordinates to sample in the source image.
+//
+// fRadius: The radius to sample with the gaussian kernel.
+//
+// pRawOutput: a pointer to a pixel buffer. Upon return, this buffer will contain the
+// sampled result of the operation.
+//
+// Supported Image Formats:
+//
+// All image formats are supported.
+//
+
+VN_STATUS vnGaussianKernel( CONST CVImage & pSrcImage, FLOAT32 fX, FLOAT32 fY, FLOAT32 fRadius, UINT8 * pRawOutput );
+
+//
+// Gaussian Kernel
+//
+// GaussianKernel performs a simple horizontal or vertical gaussian sampling of a
+// source image.
+//
+// (!) Note: gaussian filters are separable. This interface may be used to improve
+// operator performance by sampling the horizontal and vertical directions
+// separately.
+//
+// Parameters:
+//
+// pSrcImage: The read-only source image to filter.
+//
+// uiX, uiY: Specifies the coordinates to sample in the source image.
+//
+// bDirection: Specifies the direction to sample. FALSE indicates horizontal sampling,
+// while TRUE indicates vertical.
+//
+// fRadius: The radius to sample with the gaussian kernel.
+//
+// pRawOutput: a pointer to a pixel buffer. Upon return, this buffer will contain the
+// sampled result of the operation.
+//
+// Supported Image Formats:
+//
+// All image formats are supported.
+//
+
+VN_STATUS vnGaussianKernel( CONST CVImage & pSrcImage, FLOAT32 fX, FLOAT32 fY, BOOL bDirection, FLOAT32 fRadius, UINT8 * pRawOutput );
+
+#endif // __VN_IMAGE_GAUSSIAN_H__
\ No newline at end of file
diff --git a/src/libs/image-resampler/Kernels/vnImageLanczos.cpp b/src/libs/image-resampler/Kernels/vnImageLanczos.cpp
new file mode 100644
index 000000000..b98cfc9e1
--- /dev/null
+++ b/src/libs/image-resampler/Kernels/vnImageLanczos.cpp
@@ -0,0 +1,303 @@
+
+#include "vnImageLanczos.h"
+
+#include "../Utilities/vnImageBlock.h"
+
+VN_STATUS vnLanczosKernel( CONST CVImage & pSrcImage, FLOAT32 fCoeffN, FLOAT32 fX, FLOAT32 fY, UINT8 * pRawOutput )
+{
+ if ( VN_PARAM_CHECK )
+ {
+ if ( !pRawOutput || !VN_IS_IMAGE_VALID(pSrcImage) )
+ {
+ return vnPostError( VN_ERROR_INVALIDARG );
+ }
+ }
+
+ FLOAT32 fSampleCount = 0;
+ VN_PIXEL_BLOCK gTotalBlock = {0};
+ VN_PIXEL_BLOCK gTempBlock = {0};
+
+ //
+ // Scan the kernel space adding up the bicubic weights and pixel values
+ //
+
+ INT32 iRadius = fCoeffN;
+
+ for ( INT32 j = -iRadius + 1; j <= iRadius; j++ )
+ for ( INT32 i = -iRadius + 1; i <= iRadius; i++ )
+ {
+ INT32 iX = (INT32) fX + i;
+ INT32 iY = (INT32) fY + j;
+
+ if ( iX < 0 || iY < 0 ||
+ iX > pSrcImage.QueryWidth() - 1 ||
+ iY > pSrcImage.QueryHeight() - 1 )
+ {
+ continue;
+ }
+
+ UINT8 * pSrcPixel = pSrcImage.QueryData() + pSrcImage.BlockOffset( iX, iY );
+
+ FLOAT32 fXDelta = (FLOAT32) fX - iX;
+ FLOAT32 fYDelta = (FLOAT32) fY - iY;
+ FLOAT32 fDistance = sqrtf( fXDelta * fXDelta + fYDelta * fYDelta );
+ FLOAT32 fWeight = vnLanczosWeight( fCoeffN, fabs( fXDelta ) ) *
+ vnLanczosWeight( fCoeffN, fabs( fYDelta ) );
+
+ vnConvertToBlock( pSrcPixel, pSrcImage.QueryFormat(), &gTempBlock );
+
+ if ( VN_IMAGE_PRECISION_FLOAT == gTempBlock.uiPrecision )
+ {
+ gTotalBlock.fChannelData[0] += fWeight * gTempBlock.fChannelData[0];
+ gTotalBlock.fChannelData[1] += fWeight * gTempBlock.fChannelData[1];
+ gTotalBlock.fChannelData[2] += fWeight * gTempBlock.fChannelData[2];
+ gTotalBlock.fChannelData[3] += fWeight * gTempBlock.fChannelData[3];
+ }
+ else
+ {
+ gTotalBlock.iChannelData[0] += fWeight * gTempBlock.iChannelData[0];
+ gTotalBlock.iChannelData[1] += fWeight * gTempBlock.iChannelData[1];
+ gTotalBlock.iChannelData[2] += fWeight * gTempBlock.iChannelData[2];
+ gTotalBlock.iChannelData[3] += fWeight * gTempBlock.iChannelData[3];
+ }
+
+ fSampleCount += fWeight;
+ }
+
+ //
+ // Normalize our bicubic sum back to the valid pixel range
+ //
+
+ FLOAT32 fScaleFactor = 1.0 / fSampleCount;
+
+ gTotalBlock.uiPrecision = gTempBlock.uiPrecision;
+ gTotalBlock.uiChannelCount = gTempBlock.uiChannelCount;
+
+ if ( VN_IMAGE_PRECISION_FLOAT == gTempBlock.uiPrecision )
+ {
+ gTotalBlock.fChannelData[0] = fScaleFactor * gTotalBlock.fChannelData[0];
+ gTotalBlock.fChannelData[1] = fScaleFactor * gTotalBlock.fChannelData[1];
+ gTotalBlock.fChannelData[2] = fScaleFactor * gTotalBlock.fChannelData[2];
+ gTotalBlock.fChannelData[3] = fScaleFactor * gTotalBlock.fChannelData[3];
+ }
+ else
+ {
+ gTotalBlock.iChannelData[0] = ( (FLOAT32) fScaleFactor * gTotalBlock.iChannelData[0] );
+ gTotalBlock.iChannelData[1] = ( (FLOAT32) fScaleFactor * gTotalBlock.iChannelData[1] );
+ gTotalBlock.iChannelData[2] = ( (FLOAT32) fScaleFactor * gTotalBlock.iChannelData[2] );
+ gTotalBlock.iChannelData[3] = ( (FLOAT32) fScaleFactor * gTotalBlock.iChannelData[3] );
+ }
+
+ //
+ // Write our weighted sum to our output
+ //
+
+ vnConvertFromBlock( gTotalBlock, pSrcImage.QueryFormat(), pRawOutput );
+
+ return VN_SUCCESS;
+}
+
+VN_STATUS vnSampleLanczosVertical( CONST CVImage & pSrcImage, FLOAT32 fCoeffN, FLOAT32 fX, FLOAT32 fY, UINT8 * pRawOutput )
+{
+ if ( VN_PARAM_CHECK )
+ {
+ if ( !pRawOutput || !VN_IS_IMAGE_VALID(pSrcImage) )
+ {
+ return vnPostError( VN_ERROR_INVALIDARG );
+ }
+ }
+
+ FLOAT32 fSampleCount = 0;
+ VN_PIXEL_BLOCK gTotalBlock = {0};
+ VN_PIXEL_BLOCK gTempBlock = {0};
+
+ //
+ // Scan the kernel space adding up the bicubic weights and pixel values
+ //
+
+ INT32 iRadius = fCoeffN;
+
+ for ( INT32 j = -iRadius; j < iRadius; j++ )
+ {
+ INT32 iX = (INT32) fX;
+ INT32 iY = (INT32) fY + j;
+
+ if ( iX < 0 || iY < 0 ||
+ iX > pSrcImage.QueryWidth() - 1 ||
+ iY > pSrcImage.QueryHeight() - 1 )
+ {
+ continue;
+ }
+
+ UINT8 * pSrcPixel = pSrcImage.QueryData() + pSrcImage.BlockOffset( iX, iY );
+
+ FLOAT32 fYDelta = (FLOAT32) fY - iY;
+ FLOAT32 fDistance = fabs( fYDelta );
+ FLOAT32 fWeight = vnLanczosWeight( fCoeffN, (FLOAT32) fDistance );
+
+ vnConvertToBlock( pSrcPixel, pSrcImage.QueryFormat(), &gTempBlock );
+
+ if ( VN_IMAGE_PRECISION_FLOAT == gTempBlock.uiPrecision )
+ {
+ gTotalBlock.fChannelData[0] += fWeight * gTempBlock.fChannelData[0];
+ gTotalBlock.fChannelData[1] += fWeight * gTempBlock.fChannelData[1];
+ gTotalBlock.fChannelData[2] += fWeight * gTempBlock.fChannelData[2];
+ gTotalBlock.fChannelData[3] += fWeight * gTempBlock.fChannelData[3];
+ }
+ else
+ {
+ gTotalBlock.iChannelData[0] += fWeight * gTempBlock.iChannelData[0];
+ gTotalBlock.iChannelData[1] += fWeight * gTempBlock.iChannelData[1];
+ gTotalBlock.iChannelData[2] += fWeight * gTempBlock.iChannelData[2];
+ gTotalBlock.iChannelData[3] += fWeight * gTempBlock.iChannelData[3];
+ }
+
+ fSampleCount += fWeight;
+ }
+
+ //
+ // Normalize our bicubic sum back to the valid pixel range
+ //
+
+ FLOAT32 fScaleFactor = 1.0f / fSampleCount;
+
+ gTotalBlock.uiPrecision = gTempBlock.uiPrecision;
+ gTotalBlock.uiChannelCount = gTempBlock.uiChannelCount;
+
+ if ( VN_IMAGE_PRECISION_FLOAT == gTempBlock.uiPrecision )
+ {
+ gTotalBlock.fChannelData[0] = fScaleFactor * gTotalBlock.fChannelData[0];
+ gTotalBlock.fChannelData[1] = fScaleFactor * gTotalBlock.fChannelData[1];
+ gTotalBlock.fChannelData[2] = fScaleFactor * gTotalBlock.fChannelData[2];
+ gTotalBlock.fChannelData[3] = fScaleFactor * gTotalBlock.fChannelData[3];
+ }
+ else
+ {
+ gTotalBlock.iChannelData[0] = fScaleFactor * gTotalBlock.iChannelData[0];
+ gTotalBlock.iChannelData[1] = fScaleFactor * gTotalBlock.iChannelData[1];
+ gTotalBlock.iChannelData[2] = fScaleFactor * gTotalBlock.iChannelData[2];
+ gTotalBlock.iChannelData[3] = fScaleFactor * gTotalBlock.iChannelData[3];
+ }
+
+ //
+ // Write our weighted sum to our output
+ //
+
+ vnConvertFromBlock( gTotalBlock, pSrcImage.QueryFormat(), pRawOutput );
+
+ return VN_SUCCESS;
+}
+
+VN_STATUS vnSampleLanczosHorizontal( CONST CVImage & pSrcImage, FLOAT32 fCoeffN, FLOAT32 fX, FLOAT32 fY, UINT8 * pRawOutput )
+{
+ if ( VN_PARAM_CHECK )
+ {
+ if ( !pRawOutput || !VN_IS_IMAGE_VALID(pSrcImage) )
+ {
+ return vnPostError( VN_ERROR_INVALIDARG );
+ }
+ }
+
+ FLOAT32 fSampleCount = 0;
+ VN_PIXEL_BLOCK gTotalBlock = {0};
+ VN_PIXEL_BLOCK gTempBlock = {0};
+
+ //
+ // Scan the kernel space adding up the bicubic weights and pixel values
+ //
+
+ INT32 iRadius = fCoeffN;
+
+ for ( INT32 i = -iRadius; i < iRadius; i++ )
+ {
+ INT32 iX = (INT32) fX + i;
+ INT32 iY = (INT32) fY;
+
+ if ( iX < 0 || iY < 0 ||
+ iX > pSrcImage.QueryWidth() - 1 ||
+ iY > pSrcImage.QueryHeight() - 1 )
+ {
+ continue;
+ }
+
+ UINT8 * pSrcPixel = pSrcImage.QueryData() + pSrcImage.BlockOffset( iX, iY );
+
+ FLOAT32 fXDelta = (FLOAT32) fX - iX;
+ FLOAT32 fDistance = fabs( fXDelta );
+ FLOAT32 fWeight = vnLanczosWeight( fCoeffN, (FLOAT32) fDistance );
+
+ vnConvertToBlock( pSrcPixel, pSrcImage.QueryFormat(), &gTempBlock );
+
+ if ( VN_IMAGE_PRECISION_FLOAT == gTempBlock.uiPrecision )
+ {
+ gTotalBlock.fChannelData[0] += fWeight * gTempBlock.fChannelData[0];
+ gTotalBlock.fChannelData[1] += fWeight * gTempBlock.fChannelData[1];
+ gTotalBlock.fChannelData[2] += fWeight * gTempBlock.fChannelData[2];
+ gTotalBlock.fChannelData[3] += fWeight * gTempBlock.fChannelData[3];
+ }
+ else
+ {
+ gTotalBlock.iChannelData[0] += fWeight * gTempBlock.iChannelData[0];
+ gTotalBlock.iChannelData[1] += fWeight * gTempBlock.iChannelData[1];
+ gTotalBlock.iChannelData[2] += fWeight * gTempBlock.iChannelData[2];
+ gTotalBlock.iChannelData[3] += fWeight * gTempBlock.iChannelData[3];
+ }
+
+ fSampleCount += fWeight;
+ }
+
+ //
+ // Normalize our bicubic sum back to the valid pixel range
+ //
+
+ FLOAT32 fScaleFactor = 1.0f / fSampleCount;
+
+ gTotalBlock.uiPrecision = gTempBlock.uiPrecision;
+ gTotalBlock.uiChannelCount = gTempBlock.uiChannelCount;
+
+ if ( VN_IMAGE_PRECISION_FLOAT == gTempBlock.uiPrecision )
+ {
+ gTotalBlock.fChannelData[0] = fScaleFactor * gTotalBlock.fChannelData[0];
+ gTotalBlock.fChannelData[1] = fScaleFactor * gTotalBlock.fChannelData[1];
+ gTotalBlock.fChannelData[2] = fScaleFactor * gTotalBlock.fChannelData[2];
+ gTotalBlock.fChannelData[3] = fScaleFactor * gTotalBlock.fChannelData[3];
+ }
+ else
+ {
+ gTotalBlock.iChannelData[0] = fScaleFactor * gTotalBlock.iChannelData[0];
+ gTotalBlock.iChannelData[1] = fScaleFactor * gTotalBlock.iChannelData[1];
+ gTotalBlock.iChannelData[2] = fScaleFactor * gTotalBlock.iChannelData[2];
+ gTotalBlock.iChannelData[3] = fScaleFactor * gTotalBlock.iChannelData[3];
+ }
+
+ //
+ // Write our weighted sum to our output
+ //
+
+ vnConvertFromBlock( gTotalBlock, pSrcImage.QueryFormat(), pRawOutput );
+
+ return VN_SUCCESS;
+}
+
+VN_STATUS vnLanczosKernel( CONST CVImage & pSrcImage, FLOAT32 fCoeffN, FLOAT32 fX, FLOAT32 fY, BOOL bDirection, UINT8 * pRawOutput )
+{
+ if ( VN_PARAM_CHECK )
+ {
+ if ( !pRawOutput || !VN_IS_IMAGE_VALID(pSrcImage) )
+ {
+ return vnPostError( VN_ERROR_INVALIDARG );
+ }
+ }
+
+ //
+ // Compute the horizontal or vertical gaussian sample at the requested pixel coordinate.
+ //
+
+ switch ( bDirection )
+ {
+ case FALSE: return vnSampleLanczosHorizontal( pSrcImage, fCoeffN, fX, fY, pRawOutput );
+ case TRUE: return vnSampleLanczosVertical( pSrcImage, fCoeffN, fX, fY, pRawOutput );
+ }
+
+ return VN_ERROR_NOTIMPL;
+}
\ No newline at end of file
diff --git a/src/libs/image-resampler/Kernels/vnImageLanczos.h b/src/libs/image-resampler/Kernels/vnImageLanczos.h
new file mode 100644
index 000000000..dfd9b6d94
--- /dev/null
+++ b/src/libs/image-resampler/Kernels/vnImageLanczos.h
@@ -0,0 +1,128 @@
+
+//
+// Copyright (c) 2002-2009 Joe Bertolami. All Right Reserved.
+//
+// vnImageLanczos.h
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice, this
+// list of conditions and the following disclaimer.
+//
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Additional Information:
+//
+// For more information, visit http://www.bertolami.com.
+//
+
+#ifndef __VN_IMAGE_LANCZOS_H__
+#define __VN_IMAGE_LANCZOS_H__
+
+#include "../Base/vnBase.h"
+#include "../Base/vnMath.h"
+#include "../Base/vnImage.h"
+
+//
+// Normalized sinc function
+//
+
+inline FLOAT32 vnSinc( FLOAT32 fX )
+{
+ if ( 0.0 == fX ) return 1.0;
+
+ return sin( VN_PI * fX ) / ( VN_PI * fX );
+}
+
+//
+// Lanczos weighing function
+//
+
+inline FLOAT32 vnLanczosWeight( FLOAT32 fN, FLOAT32 fDistance )
+{
+ if ( fDistance <= fN )
+ {
+ return vnSinc( fDistance ) * vnSinc( fDistance / fN );
+ }
+
+ return 0.0f;
+}
+
+//
+// Lanczos Kernel
+//
+// LanczosKernel performs a simple combined 2D lanczos sampling of a source image.
+// Lanczos filters are non-separable.
+//
+// Parameters:
+//
+// pSrcImage: The read-only source image to filter.
+//
+// fCoeffN: The theta lanczos coefficient
+//
+// fX, fY: Specifies the coordinates to sample in the source image.
+//
+// pRawOutput: A pointer to a pixel buffer. Upon return, this buffer will contain the
+// sampled result of the operation.
+//
+// Supported Image Formats:
+//
+// All image formats are supported.
+//
+
+VN_STATUS vnLanczosKernel( CONST CVImage & pSrcImage,
+ FLOAT32 fCoeffN,
+ FLOAT32 fX,
+ FLOAT32 fY,
+ UINT8 * pRawOutput );
+
+//
+// Lanczos Kernel
+//
+// LanczosKernel performs a simple horizontal or vertical bicubic sampling of a
+// source image.
+//
+// (!) Note: lanczos filters are non-separable. This interface should not be used
+// to perform 2D image filtering.
+//
+// Parameters:
+//
+// pSrcImage: The read-only source image to filter.
+//
+// fCoeffN: The theta lanczos coefficient
+//
+// fX, fY: Specifies the coordinates to sample in the source image.
+//
+// bDirection: Specifies the direction to sample. FALSE indicates horizontal sampling,
+// while TRUE indicates vertical.
+//
+// pRawOutput: a pointer to a pixel buffer. Upon return, this buffer will contain the
+// sampled result of the operation.
+//
+// Supported Image Formats:
+//
+// All image formats are supported.
+//
+
+VN_STATUS vnLanczosKernel( CONST CVImage & pSrcImage,
+ FLOAT32 fCoeffN,
+ FLOAT32 fX,
+ FLOAT32 fY,
+ BOOL bDirection,
+ UINT8 * pRawOutput );
+
+#endif // __VN_IMAGE_LANCZOS_H__
\ No newline at end of file
diff --git a/src/libs/image-resampler/Kernels/vnImageNearest.cpp b/src/libs/image-resampler/Kernels/vnImageNearest.cpp
new file mode 100644
index 000000000..1ce47fd1f
--- /dev/null
+++ b/src/libs/image-resampler/Kernels/vnImageNearest.cpp
@@ -0,0 +1,40 @@
+
+#include "vnImageNearest.h"
+
+#include "../Utilities/vnImageBlock.h"
+
+VN_STATUS vnNearestKernel( CONST CVImage & pSrcImage, FLOAT32 fX, FLOAT32 fY, UINT8 * pRawOutput )
+{
+ if ( VN_PARAM_CHECK )
+ {
+ if ( !VN_IS_IMAGE_VALID(pSrcImage) || !pRawOutput )
+ {
+ return vnPostError( VN_ERROR_INVALIDARG );
+ }
+ }
+
+ INT32 iX = (INT32) ( fX + 0.5f );
+ INT32 iY = (INT32) ( fY + 0.5f );
+
+ //
+ // Floating point pixel coordinates are pixel-center based. Thus, a coordinate
+ // of (0,0) refers to the center of the first pixel in an image, and a coordinate
+ // of (0.5,0) refers to the border between the first and second pixels.
+ //
+
+ if ( iX < 0 ) iX = 0;
+ if ( iX > pSrcImage.QueryWidth() - 1 ) iX = pSrcImage.QueryWidth() - 1;
+
+ if ( iY < 0 ) iY = 0;
+ if ( iY > pSrcImage.QueryHeight() - 1 ) iY = pSrcImage.QueryHeight() - 1;
+
+ //
+ // Sample our pixel and write it to the output buffer.
+ //
+
+ UINT8 * pSrcPixel = pSrcImage.QueryData() + pSrcImage.BlockOffset( iX, iY );
+
+ memcpy( pRawOutput, pSrcPixel, pSrcImage.QueryBitsPerPixel() >> 3 );
+
+ return VN_SUCCESS;
+}
\ No newline at end of file
diff --git a/src/libs/image-resampler/Kernels/vnImageNearest.h b/src/libs/image-resampler/Kernels/vnImageNearest.h
new file mode 100644
index 000000000..5094dfcbf
--- /dev/null
+++ b/src/libs/image-resampler/Kernels/vnImageNearest.h
@@ -0,0 +1,62 @@
+
+//
+// Copyright (c) 2002-2009 Joe Bertolami. All Right Reserved.
+//
+// vnImageNearest.h
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice, this
+// list of conditions and the following disclaimer.
+//
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Additional Information:
+//
+// For more information, visit http://www.bertolami.com.
+//
+
+#ifndef __VN_IMAGE_NEAREST_H__
+#define __VN_IMAGE_NEAREST_H__
+
+#include "../Base/vnBase.h"
+#include "../Base/vnMath.h"
+#include "../Base/vnImage.h"
+
+//
+// Nearest Kernel
+//
+// This kernel selects a single pixel out of the source image -- it does not
+// perform any computation involving surrounding pixels.
+//
+// Parameters:
+//
+// pSrcImage: The read-only source image to filter.
+//
+// uiX, uiY: Specifies the coordinates to sample in the source image.
+//
+// pRawOutput: a pointer to a pixel buffer. Upon return, this buffer will contain the
+// sampled result of the operation.
+//
+// Supported Image Formats:
+//
+// All image formats are supported.
+//
+
+VN_STATUS vnNearestKernel( CONST CVImage & pSrcImage, FLOAT32 fX, FLOAT32 fY, UINT8 * pRawOutput );
+
+#endif // __VN_IMAGE_NEAREST_H__
\ No newline at end of file
diff --git a/src/libs/image-resampler/Kernels/vnImageSpline.cpp b/src/libs/image-resampler/Kernels/vnImageSpline.cpp
new file mode 100644
index 000000000..92e64d7b0
--- /dev/null
+++ b/src/libs/image-resampler/Kernels/vnImageSpline.cpp
@@ -0,0 +1,296 @@
+
+#include "vnImageSpline.h"
+
+#include "../Utilities/vnImageBlock.h"
+
+VN_STATUS vnSplineKernel( CONST CVImage & pSrcImage, FLOAT32 fX, FLOAT32 fY, UINT8 * pRawOutput )
+{
+ if ( VN_PARAM_CHECK )
+ {
+ if ( !pRawOutput || !VN_IS_IMAGE_VALID(pSrcImage) )
+ {
+ return vnPostError( VN_ERROR_INVALIDARG );
+ }
+ }
+
+ FLOAT32 fSampleCount = 0;
+ VN_PIXEL_BLOCK gTotalBlock = {0};
+ VN_PIXEL_BLOCK gTempBlock = {0};
+
+ //
+ // Scan the kernel space adding up the bicubic weights and pixel values
+ //
+
+ for ( INT32 j = -2 + 1; j <= 2; j++ )
+ for ( INT32 i = -2 + 1; i <= 2; i++ )
+ {
+ INT32 iX = (INT32) fX + i;
+ INT32 iY = (INT32) fY + j;
+
+ if ( iX < 0 || iY < 0 ||
+ iX > pSrcImage.QueryWidth() - 1 ||
+ iY > pSrcImage.QueryHeight() - 1 )
+ {
+ continue;
+ }
+
+ UINT8 * pSrcPixel = pSrcImage.QueryData() + pSrcImage.BlockOffset( iX, iY );
+
+ FLOAT32 fXDelta = (FLOAT32) fX - iX;
+ FLOAT32 fYDelta = (FLOAT32) fY - iY;
+ FLOAT32 fDistance = sqrtf( fXDelta * fXDelta + fYDelta * fYDelta );
+ FLOAT32 fWeight = vnSplineWeight( (FLOAT32) fDistance );
+
+ vnConvertToBlock( pSrcPixel, pSrcImage.QueryFormat(), &gTempBlock );
+
+ if ( VN_IMAGE_PRECISION_FLOAT == gTempBlock.uiPrecision )
+ {
+ gTotalBlock.fChannelData[0] += fWeight * gTempBlock.fChannelData[0];
+ gTotalBlock.fChannelData[1] += fWeight * gTempBlock.fChannelData[1];
+ gTotalBlock.fChannelData[2] += fWeight * gTempBlock.fChannelData[2];
+ gTotalBlock.fChannelData[3] += fWeight * gTempBlock.fChannelData[3];
+ }
+ else
+ {
+ gTotalBlock.iChannelData[0] += fWeight * gTempBlock.iChannelData[0];
+ gTotalBlock.iChannelData[1] += fWeight * gTempBlock.iChannelData[1];
+ gTotalBlock.iChannelData[2] += fWeight * gTempBlock.iChannelData[2];
+ gTotalBlock.iChannelData[3] += fWeight * gTempBlock.iChannelData[3];
+ }
+
+ fSampleCount += fWeight;
+ }
+
+ //
+ // Normalize our bicubic sum back to the valid pixel range
+ //
+
+ FLOAT32 fScaleFactor = 1.0f / fSampleCount;
+
+ gTotalBlock.uiPrecision = gTempBlock.uiPrecision;
+ gTotalBlock.uiChannelCount = gTempBlock.uiChannelCount;
+
+ if ( VN_IMAGE_PRECISION_FLOAT == gTempBlock.uiPrecision )
+ {
+ gTotalBlock.fChannelData[0] = fScaleFactor * gTotalBlock.fChannelData[0];
+ gTotalBlock.fChannelData[1] = fScaleFactor * gTotalBlock.fChannelData[1];
+ gTotalBlock.fChannelData[2] = fScaleFactor * gTotalBlock.fChannelData[2];
+ gTotalBlock.fChannelData[3] = fScaleFactor * gTotalBlock.fChannelData[3];
+ }
+ else
+ {
+ gTotalBlock.iChannelData[0] = fScaleFactor * gTotalBlock.iChannelData[0];
+ gTotalBlock.iChannelData[1] = fScaleFactor * gTotalBlock.iChannelData[1];
+ gTotalBlock.iChannelData[2] = fScaleFactor * gTotalBlock.iChannelData[2];
+ gTotalBlock.iChannelData[3] = fScaleFactor * gTotalBlock.iChannelData[3];
+ }
+
+ //
+ // Write our weighted sum to our output
+ //
+
+ vnConvertFromBlock( gTotalBlock, pSrcImage.QueryFormat(), pRawOutput );
+
+ return VN_SUCCESS;
+}
+
+VN_STATUS vnSampleSplineVertical( CONST CVImage & pSrcImage, FLOAT32 fX, FLOAT32 fY, UINT8 * pRawOutput )
+{
+ if ( VN_PARAM_CHECK )
+ {
+ if ( !pRawOutput || !VN_IS_IMAGE_VALID(pSrcImage) )
+ {
+ return vnPostError( VN_ERROR_INVALIDARG );
+ }
+ }
+
+ FLOAT32 fSampleCount = 0;
+ VN_PIXEL_BLOCK gTotalBlock = {0};
+ VN_PIXEL_BLOCK gTempBlock = {0};
+
+ //
+ // Scan the kernel space adding up the bicubic weights and pixel values
+ //
+
+ for ( INT32 j = -2; j < 2; j++ )
+ {
+ INT32 iX = (INT32) fX;
+ INT32 iY = (INT32) fY + j;
+
+ if ( iX < 0 || iY < 0 ||
+ iX > pSrcImage.QueryWidth() - 1 ||
+ iY > pSrcImage.QueryHeight() - 1 )
+ {
+ continue;
+ }
+
+ UINT8 * pSrcPixel = pSrcImage.QueryData() + pSrcImage.BlockOffset( iX, iY );
+
+ FLOAT32 fYDelta = (FLOAT32) fY - iY;
+ FLOAT32 fDistance = fabs( fYDelta );
+ FLOAT32 fWeight = vnSplineWeight( (FLOAT32) fDistance );
+
+ vnConvertToBlock( pSrcPixel, pSrcImage.QueryFormat(), &gTempBlock );
+
+ if ( VN_IMAGE_PRECISION_FLOAT == gTempBlock.uiPrecision )
+ {
+ gTotalBlock.fChannelData[0] += fWeight * gTempBlock.fChannelData[0];
+ gTotalBlock.fChannelData[1] += fWeight * gTempBlock.fChannelData[1];
+ gTotalBlock.fChannelData[2] += fWeight * gTempBlock.fChannelData[2];
+ gTotalBlock.fChannelData[3] += fWeight * gTempBlock.fChannelData[3];
+ }
+ else
+ {
+ gTotalBlock.iChannelData[0] += fWeight * gTempBlock.iChannelData[0];
+ gTotalBlock.iChannelData[1] += fWeight * gTempBlock.iChannelData[1];
+ gTotalBlock.iChannelData[2] += fWeight * gTempBlock.iChannelData[2];
+ gTotalBlock.iChannelData[3] += fWeight * gTempBlock.iChannelData[3];
+ }
+
+ fSampleCount += fWeight;
+ }
+
+ //
+ // Normalize our bicubic sum back to the valid pixel range
+ //
+
+ FLOAT32 fScaleFactor = 1.0f / fSampleCount;
+
+ gTotalBlock.uiPrecision = gTempBlock.uiPrecision;
+ gTotalBlock.uiChannelCount = gTempBlock.uiChannelCount;
+
+ if ( VN_IMAGE_PRECISION_FLOAT == gTempBlock.uiPrecision )
+ {
+ gTotalBlock.fChannelData[0] = fScaleFactor * gTotalBlock.fChannelData[0];
+ gTotalBlock.fChannelData[1] = fScaleFactor * gTotalBlock.fChannelData[1];
+ gTotalBlock.fChannelData[2] = fScaleFactor * gTotalBlock.fChannelData[2];
+ gTotalBlock.fChannelData[3] = fScaleFactor * gTotalBlock.fChannelData[3];
+ }
+ else
+ {
+ gTotalBlock.iChannelData[0] = fScaleFactor * gTotalBlock.iChannelData[0];
+ gTotalBlock.iChannelData[1] = fScaleFactor * gTotalBlock.iChannelData[1];
+ gTotalBlock.iChannelData[2] = fScaleFactor * gTotalBlock.iChannelData[2];
+ gTotalBlock.iChannelData[3] = fScaleFactor * gTotalBlock.iChannelData[3];
+ }
+
+ //
+ // Write our weighted sum to our output
+ //
+
+ vnConvertFromBlock( gTotalBlock, pSrcImage.QueryFormat(), pRawOutput );
+
+ return VN_SUCCESS;
+}
+
+VN_STATUS vnSampleSplineHorizontal( CONST CVImage & pSrcImage, FLOAT32 fX, FLOAT32 fY, UINT8 * pRawOutput )
+{
+ if ( VN_PARAM_CHECK )
+ {
+ if ( !pRawOutput || !VN_IS_IMAGE_VALID(pSrcImage) )
+ {
+ return vnPostError( VN_ERROR_INVALIDARG );
+ }
+ }
+
+ FLOAT32 fSampleCount = 0;
+ VN_PIXEL_BLOCK gTotalBlock = {0};
+ VN_PIXEL_BLOCK gTempBlock = {0};
+
+ //
+ // Scan the kernel space adding up the bicubic weights and pixel values
+ //
+
+ for ( INT32 i = -2; i < 2; i++ )
+ {
+ INT32 iX = (INT32) fX + i;
+ INT32 iY = (INT32) fY;
+
+ if ( iX < 0 || iY < 0 ||
+ iX > pSrcImage.QueryWidth() - 1 ||
+ iY > pSrcImage.QueryHeight() - 1 )
+ {
+ continue;
+ }
+
+ UINT8 * pSrcPixel = pSrcImage.QueryData() + pSrcImage.BlockOffset( iX, iY );
+
+ FLOAT32 fXDelta = (FLOAT32) fX - iX;
+ FLOAT32 fDistance = fabs( fXDelta );
+ FLOAT32 fWeight = vnSplineWeight( (FLOAT32) fDistance );
+
+ vnConvertToBlock( pSrcPixel, pSrcImage.QueryFormat(), &gTempBlock );
+
+ if ( VN_IMAGE_PRECISION_FLOAT == gTempBlock.uiPrecision )
+ {
+ gTotalBlock.fChannelData[0] += fWeight * gTempBlock.fChannelData[0];
+ gTotalBlock.fChannelData[1] += fWeight * gTempBlock.fChannelData[1];
+ gTotalBlock.fChannelData[2] += fWeight * gTempBlock.fChannelData[2];
+ gTotalBlock.fChannelData[3] += fWeight * gTempBlock.fChannelData[3];
+ }
+ else
+ {
+ gTotalBlock.iChannelData[0] += fWeight * gTempBlock.iChannelData[0];
+ gTotalBlock.iChannelData[1] += fWeight * gTempBlock.iChannelData[1];
+ gTotalBlock.iChannelData[2] += fWeight * gTempBlock.iChannelData[2];
+ gTotalBlock.iChannelData[3] += fWeight * gTempBlock.iChannelData[3];
+ }
+
+ fSampleCount += fWeight;
+ }
+
+ //
+ // Normalize our bicubic sum back to the valid pixel range
+ //
+
+ FLOAT32 fScaleFactor = 1.0f / fSampleCount;
+
+ gTotalBlock.uiPrecision = gTempBlock.uiPrecision;
+ gTotalBlock.uiChannelCount = gTempBlock.uiChannelCount;
+
+ if ( VN_IMAGE_PRECISION_FLOAT == gTempBlock.uiPrecision )
+ {
+ gTotalBlock.fChannelData[0] = fScaleFactor * gTotalBlock.fChannelData[0];
+ gTotalBlock.fChannelData[1] = fScaleFactor * gTotalBlock.fChannelData[1];
+ gTotalBlock.fChannelData[2] = fScaleFactor * gTotalBlock.fChannelData[2];
+ gTotalBlock.fChannelData[3] = fScaleFactor * gTotalBlock.fChannelData[3];
+ }
+ else
+ {
+ gTotalBlock.iChannelData[0] = fScaleFactor * gTotalBlock.iChannelData[0];
+ gTotalBlock.iChannelData[1] = fScaleFactor * gTotalBlock.iChannelData[1];
+ gTotalBlock.iChannelData[2] = fScaleFactor * gTotalBlock.iChannelData[2];
+ gTotalBlock.iChannelData[3] = fScaleFactor * gTotalBlock.iChannelData[3];
+ }
+
+ //
+ // Write our weighted sum to our output
+ //
+
+ vnConvertFromBlock( gTotalBlock, pSrcImage.QueryFormat(), pRawOutput );
+
+ return VN_SUCCESS;
+}
+
+VN_STATUS vnSplineKernel( CONST CVImage & pSrcImage, FLOAT32 fX, FLOAT32 fY, BOOL bDirection, UINT8 * pRawOutput )
+{
+ if ( VN_PARAM_CHECK )
+ {
+ if ( !pRawOutput || !VN_IS_IMAGE_VALID(pSrcImage) )
+ {
+ return vnPostError( VN_ERROR_INVALIDARG );
+ }
+ }
+
+ //
+ // Compute the horizontal or vertical gaussian sample at the requested pixel coordinate.
+ //
+
+ switch ( bDirection )
+ {
+ case FALSE: return vnSampleSplineHorizontal( pSrcImage, fX, fY, pRawOutput );
+ case TRUE: return vnSampleSplineVertical( pSrcImage, fX, fY, pRawOutput );
+ }
+
+ return VN_ERROR_NOTIMPL;
+}
\ No newline at end of file
diff --git a/src/libs/image-resampler/Kernels/vnImageSpline.h b/src/libs/image-resampler/Kernels/vnImageSpline.h
new file mode 100644
index 000000000..483837213
--- /dev/null
+++ b/src/libs/image-resampler/Kernels/vnImageSpline.h
@@ -0,0 +1,136 @@
+
+//
+// Copyright (c) 2002-2009 Joe Bertolami. All Right Reserved.
+//
+// vnImageSpline.h
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice, this
+// list of conditions and the following disclaimer.
+//
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Additional Information:
+//
+// For more information, visit http://www.bertolami.com.
+//
+
+#ifndef __VN_IMAGE_BICUBIC_SPLINE_H__
+#define __VN_IMAGE_BICUBIC_SPLINE_H__
+
+#include "../Base/vnBase.h"
+#include "../Base/vnMath.h"
+#include "../Base/vnImage.h"
+
+//
+// Spline weighing function
+//
+
+inline FLOAT32 vnSplineWeight( FLOAT32 fDistance )
+{
+ //
+ // Our bicubic function is designed to provide feedback over a radius of 2.0 pixels.
+ //
+
+ FLOAT32 fRange = fDistance;
+ FLOAT32 fResult = 0.0;
+
+ if ( fRange < 1.0 )
+ {
+ FLOAT32 fCubicTerm = ( 1.5f ) * ( fRange * fRange * fRange );
+ FLOAT32 fQuadTerm = ( -2.5f ) * ( fRange * fRange );
+ FLOAT32 fConstTerm = ( 1.0f );
+
+ fResult = ( fCubicTerm + fQuadTerm + fConstTerm );
+ }
+
+ else if ( fRange >= 1.0 && fRange < 2.0 )
+ {
+ FLOAT32 fCubicTerm = ( -0.5f ) * ( fRange * fRange * fRange );
+ FLOAT32 fQuadTerm = ( 2.5f ) * ( fRange * fRange );
+ FLOAT32 fLinTerm = ( 4.0f ) * ( fRange );
+ FLOAT32 fConstTerm = ( 2.0f );
+
+ fResult = ( fCubicTerm + fQuadTerm + fLinTerm + fConstTerm );
+ }
+
+ if ( fResult < 0 ) fResult = 0.0;
+
+ return fResult;
+}
+
+//
+// Bicubic Spline Kernel
+//
+// SplineKernel performs a simple combined 2D bicubic sampling of a source image.
+// Bicubic filters are separable, but this function performs a non-separated kernel
+// that facilitates non-uniform sampling filters.
+//
+// Parameters:
+//
+// pSrcImage: The read-only source image to filter.
+//
+// fX, fY: Specifies the coordinates to sample in the source image.
+//
+// pRawOutput: A pointer to a pixel buffer. Upon return, this buffer will contain the
+// sampled result of the operation.
+//
+// Supported Image Formats:
+//
+// All image formats are supported.
+//
+
+VN_STATUS vnSplineKernel( CONST CVImage & pSrcImage,
+ FLOAT32 fX,
+ FLOAT32 fY,
+ UINT8 * pRawOutput );
+
+//
+// Bicubic Spline Kernel
+//
+// SplineKernel performs a simple horizontal or vertical bicubic sampling of a
+// source image.
+//
+// (!) Note: bicubic filters are separable. This interface may be used to improve
+// operator performance by sampling the horizontal and vertical directions
+// separately.
+//
+// Parameters:
+//
+// pSrcImage: The read-only source image to filter.
+//
+// fX, fY: Specifies the coordinates to sample in the source image.
+//
+// bDirection: Specifies the direction to sample. FALSE indicates horizontal sampling,
+// while TRUE indicates vertical.
+//
+// pRawOutput: a pointer to a pixel buffer. Upon return, this buffer will contain the
+// sampled result of the operation.
+//
+// Supported Image Formats:
+//
+// All image formats are supported.
+//
+
+VN_STATUS vnSplineKernel( CONST CVImage & pSrcImage,
+ FLOAT32 fX,
+ FLOAT32 fY,
+ BOOL bDirection,
+ UINT8 * pRawOutput );
+
+#endif // __VN_IMAGE_BICUBIC_SPLINE_H__
\ No newline at end of file
diff --git a/src/libs/image-resampler/Operators/vnImageClone.h b/src/libs/image-resampler/Operators/vnImageClone.h
new file mode 100644
index 000000000..a31195fba
--- /dev/null
+++ b/src/libs/image-resampler/Operators/vnImageClone.h
@@ -0,0 +1,83 @@
+
+//
+// Copyright (c) 2002-2009 Joe Bertolami. All Right Reserved.
+//
+// vnImageClone.h
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice, this
+// list of conditions and the following disclaimer.
+//
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Additional Information:
+//
+// For more information, visit http://www.bertolami.com.
+//
+
+#ifndef __VN_IMAGE_CLONE_H__
+#define __VN_IMAGE_CLONE_H__
+
+#include "../Base/vnBase.h"
+#include "../Base/vnImage.h"
+#include "../Base/vnImageFormat.h"
+
+//
+// CloneImage Operator
+//
+// CloneImage is a simple shorthand for creating an identical copy of an image.
+//
+// Parameters:
+//
+// pSrcImage: The read-only source image to copy.
+//
+// pDestImage: an uninitialized pointer to an image object. Upon successful
+// return, this object will be fully initialized and contain a
+// clone of pSrcImage.
+//
+// Supported Image Formats:
+//
+// All image formats are supported.
+//
+
+inline VN_STATUS vnCloneImage( CONST CVImage & pSrcImage, CVImage * pDestImage )
+{
+ if ( VN_PARAM_CHECK )
+ {
+ if ( VN_IMAGE_FORMAT_NONE == pSrcImage.QueryFormat() || !pDestImage )
+ {
+ return vnPostError( VN_ERROR_INVALIDARG );
+ }
+
+ if ( &pSrcImage == pDestImage )
+ {
+ return vnPostError( VN_ERROR_INVALIDARG );
+ }
+ }
+
+ if ( VN_FAILED( vnCreateImage( pSrcImage.QueryFormat(), pSrcImage.QueryWidth(), pSrcImage.QueryHeight(), pDestImage ) ) )
+ {
+ return vnPostError( VN_ERROR_EXECUTION_FAILURE );
+ }
+
+ memcpy( (pDestImage)->QueryData(), pSrcImage.QueryData(), pSrcImage.SlicePitch() );
+
+ return VN_SUCCESS;
+}
+
+#endif // __VN_IMAGE_CLONE_H__
\ No newline at end of file
diff --git a/src/libs/image-resampler/Operators/vnImageConvert.cpp b/src/libs/image-resampler/Operators/vnImageConvert.cpp
new file mode 100644
index 000000000..103d47b87
--- /dev/null
+++ b/src/libs/image-resampler/Operators/vnImageConvert.cpp
@@ -0,0 +1,95 @@
+
+#include "vnImageConvert.h"
+
+#include "../Base/vnMath.h"
+#include "../Utilities/vnImageBlock.h"
+#include "../Operators/vnImageClone.h"
+
+VN_STATUS vnConvertImage( CONST CVImage & pSrcImage, VN_IMAGE_FORMAT destFormat, CVImage * pDestImage )
+{
+ if ( VN_PARAM_CHECK )
+ {
+ if ( !VN_IS_IMAGE_VALID(pSrcImage) || VN_IMAGE_FORMAT_NONE == destFormat || !pDestImage )
+ {
+ return vnPostError( VN_ERROR_INVALIDARG );
+ }
+
+ if ( &pSrcImage == pDestImage )
+ {
+ return vnPostError( VN_ERROR_INVALIDARG );
+ }
+ }
+
+ //
+ // Check if we're performing a senseless conversion
+ //
+
+ if ( pSrcImage.QueryFormat() == destFormat )
+ {
+ return vnCloneImage( pSrcImage, pDestImage );
+ }
+
+ //
+ // Our conversion is valid, so we perform it.
+ //
+
+ if ( VN_FAILED( vnCreateImage( destFormat, pSrcImage.QueryWidth(), pSrcImage.QueryHeight(), pDestImage ) ) )
+ {
+ return vnPostError( VN_ERROR_EXECUTION_FAILURE );
+ }
+
+ //
+ // Check if we're merely converting between color and depth values
+ //
+
+ if ( ( pSrcImage.QueryFormat() & ( ~VN_IMAGE_SPACE_MASK ) ) ==
+ ( destFormat & ( ~VN_IMAGE_SPACE_MASK ) ) )
+ {
+ memcpy( pDestImage->QueryData(), pSrcImage.QueryData(), pSrcImage.SlicePitch() );
+
+ return VN_SUCCESS;
+ }
+
+ //
+ // Perform an exhaustive conversion
+ //
+
+ for ( UINT32 j = 0; j < pSrcImage.QueryHeight(); j++ )
+ for ( UINT32 i = 0; i < pSrcImage.QueryWidth(); i++ )
+ {
+ UINT8 * pSrcPixel = pSrcImage.QueryData() + pSrcImage.BlockOffset( i, j );
+ UINT8 * pDestPixel = pDestImage->QueryData() + pDestImage->BlockOffset( i, j );
+
+ VN_PIXEL_BLOCK sourceBlock = {0};
+ VN_PIXEL_BLOCK destBlock = {0};
+
+ //
+ // Unpack the source channel values into its native block.
+ //
+
+ if ( VN_FAILED( vnConvertToBlock( pSrcPixel, pSrcImage.QueryFormat(), &sourceBlock ) ) )
+ {
+ return vnPostError( VN_ERROR_EXECUTION_FAILURE );
+ }
+
+ //
+ // Convert our source image block into a properly precise destination block.
+ //
+
+ if ( VN_FAILED( vnConvertBlock( sourceBlock, destFormat, &destBlock ) ) )
+ {
+ return vnPostError( VN_ERROR_EXECUTION_FAILURE );
+ }
+
+ //
+ // Pack the destination block into our destination buffer.
+ //
+
+ if ( VN_FAILED( vnConvertFromBlock( destBlock, destFormat, pDestPixel ) ) )
+ {
+ return vnPostError( VN_ERROR_EXECUTION_FAILURE );
+ }
+ }
+
+ return VN_SUCCESS;
+}
diff --git a/src/libs/image-resampler/Operators/vnImageConvert.h b/src/libs/image-resampler/Operators/vnImageConvert.h
new file mode 100644
index 000000000..e2556e273
--- /dev/null
+++ b/src/libs/image-resampler/Operators/vnImageConvert.h
@@ -0,0 +1,61 @@
+
+//
+// Copyright (c) 2002-2009 Joe Bertolami. All Right Reserved.
+//
+// vnImageConvert.h
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice, this
+// list of conditions and the following disclaimer.
+//
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Additional Information:
+//
+// For more information, visit http://www.bertolami.com.
+//
+
+#ifndef __VN_IMAGE_CONVERT_H__
+#define __VN_IMAGE_CONVERT_H__
+
+#include "../Base/vnBase.h"
+#include "../Base/vnImage.h"
+#include "../Base/vnImageFormat.h"
+
+//
+// ConvertImage Operator
+//
+// ConvertImage performs an intelligent conversion of a source image into a
+// destination image. The source image must contain a recognized and valid image
+// format.
+//
+// Parameters:
+//
+// pSrcImage: The read-only source image to convert.
+//
+// pDestImage: a pointer to an image object. Upon successful return, this object
+// will be fully initialized and contain a converted reflection of pSrcImage.
+//
+// Supported Image Formats:
+//
+// All image formats are supported.
+//
+
+VN_STATUS vnConvertImage( CONST CVImage & pSrcImage, VN_IMAGE_FORMAT destFormat, CVImage * pDestImage );
+
+#endif // __VN_IMAGE_CONVERT_H__
diff --git a/src/libs/image-resampler/Operators/vnImageResize.cpp b/src/libs/image-resampler/Operators/vnImageResize.cpp
new file mode 100644
index 000000000..1c0051476
--- /dev/null
+++ b/src/libs/image-resampler/Operators/vnImageResize.cpp
@@ -0,0 +1,234 @@
+
+#include "vnImageResize.h"
+
+#include "../Operators/vnImageClone.h"
+
+VN_STATUS vnResizeImageNonSeparable( CONST CVImage & pSrcImage, VN_IMAGE_KERNEL_TYPE uiKernel, FLOAT32 fHRatio, FLOAT32 fVRatio, CVImage * pDestImage )
+{
+ FLOAT32 fRadius = sqrtf( (FLOAT32) fHRatio * fHRatio + fVRatio * fVRatio );
+
+ for ( UINT32 j = 0; j < pDestImage->QueryHeight(); j++ )
+ for ( UINT32 i = 0; i < pDestImage->QueryWidth(); i++ )
+ {
+ UINT8 * pOutputData = pDestImage->QueryData() + pDestImage->BlockOffset( i, j );
+
+ //
+ // Determine the sub-pixel location of our *target* (i,j) coordinate, in the space
+ // of our source image.
+ //
+
+ FLOAT32 fX = (FLOAT32) i * fHRatio;
+ FLOAT32 fY = (FLOAT32) j * fVRatio;
+
+ if ( VN_FAILED( vnSampleImage( pSrcImage, uiKernel, VN_IMAGE_KERNEL_2D_COMBINED, fX, fY, fRadius, pOutputData ) ) )
+ {
+ return vnPostError( VN_ERROR_EXECUTION_FAILURE );
+ }
+ }
+
+ return VN_SUCCESS;
+}
+
+VN_STATUS vnResizeImageSeparable( CONST CVImage & pSrcImage, VN_IMAGE_KERNEL_TYPE uiKernel, FLOAT32 fHRatio, FLOAT32 fVRatio, CVImage * pDestImage )
+{
+ CVImage tempImage;
+
+ if ( VN_FAILED( vnCreateImage( pSrcImage.QueryFormat(), pDestImage->QueryWidth(), pSrcImage.QueryHeight(), &tempImage ) ) )
+ {
+ return vnPostError( VN_ERROR_EXECUTION_FAILURE );
+ }
+
+ //
+ // Perform the horizontal filter sampling.
+ //
+
+ for ( UINT32 j = 0; j < pSrcImage.QueryHeight(); j++ )
+ for ( UINT32 i = 0; i < pDestImage->QueryWidth(); i++ )
+ {
+ UINT8 * pOutputData = tempImage.QueryData() + tempImage.BlockOffset( i, j );
+
+ //
+ // Determine the sub-pixel location of our *target* (i,j) coordinate, in the space
+ // of our source image.
+ //
+
+ FLOAT32 fX = (FLOAT32) i * fHRatio;
+ FLOAT32 fY = (FLOAT32) j;
+
+ if ( VN_FAILED( vnSampleImage( pSrcImage, uiKernel, VN_IMAGE_KERNEL_1D_HORIZONTAL, fX, fY, fHRatio, pOutputData ) ) )
+ {
+ return vnPostError( VN_ERROR_EXECUTION_FAILURE );
+ }
+ }
+
+ //
+ // Perform the vertical filter sampling.
+ //
+
+ for ( UINT32 j = 0; j < pDestImage->QueryHeight(); j++ )
+ for ( UINT32 i = 0; i < pDestImage->QueryWidth(); i++ )
+ {
+ UINT8 * pOutputData = pDestImage->QueryData() + pDestImage->BlockOffset( i, j );
+
+ //
+ // Determine the sub-pixel location of our *target* (i,j) coordinate, in the space
+ // of our temp image.
+ //
+
+ FLOAT32 fX = (FLOAT32) i;
+ FLOAT32 fY = (FLOAT32) j * fVRatio;
+
+ if ( VN_FAILED( vnSampleImage( tempImage, uiKernel, VN_IMAGE_KERNEL_1D_VERTICAL, fX, fY, fVRatio, pOutputData ) ) )
+ {
+ return vnPostError( VN_ERROR_EXECUTION_FAILURE );
+ }
+ }
+
+ return VN_SUCCESS;
+}
+
+VN_STATUS vnResizeImageWithPadding( CONST CVImage & pSrcImage, CVImage * pDestImage )
+{
+ if ( VN_PARAM_CHECK )
+ {
+ if ( !VN_IS_IMAGE_VALID(pSrcImage) || !pDestImage )
+ {
+ return vnPostError( VN_ERROR_INVALIDARG );
+ }
+ }
+
+ //
+ // Copy each row of our data over, paying special attention to padding.
+ //
+
+ for ( UINT32 j = 0; j < VN_MIN2( pDestImage->QueryHeight(), pSrcImage.QueryHeight() ); j++ )
+ {
+ UINT8 * pbyDest = pDestImage->QueryData() + pDestImage->BlockOffset( 0, j );
+ UINT8 * pbySrc = pSrcImage.QueryData() + pSrcImage.BlockOffset( 0, j );
+
+ memcpy( pbyDest, pbySrc, VN_MIN2( pDestImage->RowPitch(), pSrcImage.RowPitch() ) );
+ }
+
+ return VN_SUCCESS;
+}
+
+VN_STATUS vnResizeImage( CONST CVImage & pSrcImage,
+ VN_IMAGE_KERNEL_TYPE uiKernel,
+ UINT32 uiWidth,
+ UINT32 uiHeight,
+ VN_IMAGE_RESIZE_PARAMETERS uiFlags,
+ CVImage * pDestImage )
+{
+ if ( VN_PARAM_CHECK )
+ {
+ if ( !VN_IS_IMAGE_VALID(pSrcImage) || 0 == uiWidth || 0 == uiHeight || !pDestImage )
+ {
+ return vnPostError( VN_ERROR_INVALIDARG );
+ }
+ }
+
+ //
+ // Check our parameters and state
+ //
+
+ if ( VN_IMAGE_RESIZE_POW2 & uiFlags )
+ {
+ //
+ // Guarantee that our dimensions are pow-2 aligned
+ //
+
+ uiWidth = vnAlign2( uiWidth );
+ uiHeight = vnAlign2( uiHeight );
+ }
+
+ if ( VN_IMAGE_RESIZE_SYMMETRIC & uiFlags )
+ {
+ //
+ // Enforce square dimensions
+ //
+ // (?) Should we change this to aspect scale?
+ //
+
+ uiHeight = uiWidth;
+ }
+
+ //
+ // Verify whether resampling is actually necessary
+ //
+
+ if ( uiWidth == pSrcImage.QueryWidth() && uiHeight == pSrcImage.QueryHeight() )
+ {
+ return vnCloneImage( pSrcImage, pDestImage );
+ }
+
+ //
+ // Create our destination image.
+ //
+
+ if ( VN_FAILED( vnCreateImage( pSrcImage.QueryFormat(), uiWidth, uiHeight, pDestImage ) ) )
+ {
+ return vnPostError( VN_ERROR_EXECUTION_FAILURE );
+ }
+
+ //
+ // Verify one final option -- if the caller is requesting a pad extension rather than
+ // a resampling, we do it instead.
+ //
+
+ if ( VN_IMAGE_RESIZE_PAD_EXTEND & uiFlags )
+ {
+ //
+ // Pad our image to keep the original data in-place and un-scaled.
+ //
+
+ return vnResizeImageWithPadding( pSrcImage, pDestImage );
+ }
+
+ //
+ // Prepare to perform our resample. This is perhaps the most important part of our resizer --
+ // the calculation of our image ratios. These ratios are responsible for mapping between our
+ // integer pixel locations of the source image and our float sub-pixel coordinates within the
+ // source image that represent a reflection of our destination pixels.
+ //
+ // Quick visualization:
+ //
+ // For a source 2x1 image and a destination 4x1 image:
+ //
+ // +------------+------------+ o: Note that the center of the first and last pixels
+ // Src: | 0 | 1 | in both our src and dst images line up with our
+ // +------------+------------+ float edges of 0.0 and 1.0.
+ // | |
+ // 0.0 1.0 o: Our sub-pixel interpolated coordinates will always
+ // | | be >= 0 and <= src_width
+ // +---+---+---+---+
+ // Dst: | 0 | 1 | 2 | 3 | o: Thus the src pixel coordinate of our final destination
+ // +---+---+---+---+ pixel will always be src_width - 1.
+ //
+
+ // josephb: fixme -- check this ? 1.0 (it used to be 0.0).
+
+ FLOAT32 fHorizRatio = ( 1 == uiWidth ? 1.0f : (FLOAT32) ( pSrcImage.QueryWidth() - 1 ) / ( uiWidth - 1 ) );
+ FLOAT32 fVertRatio = ( 1 == uiHeight ? 1.0f : (FLOAT32) ( pSrcImage.QueryHeight() - 1 ) / ( uiHeight - 1 ) );
+
+ //
+ // The ratio really just needs to be large enough to cover the potentially important pixels.
+ // Note that each kernel will clamp to its own (smaller) kernel. The radii simply need to be
+ // non-zero and large enough to cover the space.
+ //
+
+ //
+ // If our kernel is non-separable for resizing operations, perform it in 2D
+ //
+
+ if ( !( 0x80000000 & uiKernel ) )
+ {
+ return vnResizeImageNonSeparable( pSrcImage, uiKernel, fHorizRatio, fVertRatio, pDestImage );
+ }
+
+ //
+ // Our resize filter is separable, so we perform it in the horizontal space first, and
+ // then in the vertical.
+ //
+
+ return vnResizeImageSeparable( pSrcImage, uiKernel, fHorizRatio, fVertRatio, pDestImage );
+}
diff --git a/src/libs/image-resampler/Operators/vnImageResize.h b/src/libs/image-resampler/Operators/vnImageResize.h
new file mode 100644
index 000000000..2a7a1a660
--- /dev/null
+++ b/src/libs/image-resampler/Operators/vnImageResize.h
@@ -0,0 +1,77 @@
+
+//
+// Copyright (c) 2002-2009 Joe Bertolami. All Right Reserved.
+//
+// vnImageResize.h
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice, this
+// list of conditions and the following disclaimer.
+//
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Additional Information:
+//
+// For more information, visit http://www.bertolami.com.
+//
+
+#ifndef __VN_IMAGE_RESIZE_H__
+#define __VN_IMAGE_RESIZE_H__
+
+#include "../Base/vnBase.h"
+#include "../Base/vnMath.h"
+#include "../Base/vnImage.h"
+#include "../Utilities/vnImageSampler.h"
+
+#define VN_IMAGE_RESIZE_PARAMETERS UINT16
+#define VN_IMAGE_RESIZE_DEFAULT 0x0000
+#define VN_IMAGE_RESIZE_POW2 0x0001 // align dimensions to pow-2
+#define VN_IMAGE_RESIZE_SYMMETRIC 0x0002 // constrain proportions
+#define VN_IMAGE_RESIZE_PAD_EXTEND 0x0004 // resize canvas but not the content
+
+//
+// ResizeImage Operator
+//
+// ResizeImage resamples the image to the dimensions specified.
+//
+// Parameters:
+//
+// pSrcImage: The read-only source image to resize.
+//
+// uiKernel: The sampling kernel to use when performing the resizing.
+//
+// uiWidth: The destination width to target.
+//
+// uiHeight: The destination height to target.
+//
+// pDestImage: a pointer to an image object. Upon successful return, this object will
+// contain a resized view of the source image.
+//
+// Supported Image Formats:
+//
+// All image formats are supported.
+//
+
+VN_STATUS vnResizeImage( CONST CVImage & pSrcImage,
+ VN_IMAGE_KERNEL_TYPE uiKernel,
+ UINT32 uiWidth,
+ UINT32 uiHeight,
+ VN_IMAGE_RESIZE_PARAMETERS uiFlags,
+ CVImage * pDestImage );
+
+#endif // __VN_IMAGE_RESIZE_H__
diff --git a/src/libs/image-resampler/Operators/vnImageScale.cpp b/src/libs/image-resampler/Operators/vnImageScale.cpp
new file mode 100644
index 000000000..8408f5646
--- /dev/null
+++ b/src/libs/image-resampler/Operators/vnImageScale.cpp
@@ -0,0 +1,210 @@
+
+#include "vnImageScale.h"
+
+#include "../Operators/vnImageClone.h"
+
+VN_STATUS vnBresenhamScaleLine( UINT8 * pSrcBuffer, UINT32 uiSrcLength, UINT32 uiSrcStride, UINT32 uiPixelPitch, UINT8 * pDestBuffer, UINT32 uiDestLength, UINT32 uiDestStride )
+{
+ if ( VN_PARAM_CHECK )
+ {
+ if ( !pSrcBuffer || 0 == uiSrcLength || !pDestBuffer || 0 == uiDestLength || 0 == uiPixelPitch )
+ {
+ return vnPostError( VN_ERROR_INVALIDARG );
+ }
+ }
+
+ UINT32 uiErrorTerm = 0;
+
+ //
+ // We avoid the use of division or modulus operators for each iteration, but rely upon
+ // an initial branch per line.
+ //
+
+ if ( uiDestLength > uiSrcLength )
+ {
+ UINT32 uiSrcCoord = 0;
+
+ for ( UINT32 uiDestCoord = 0; uiDestCoord < uiDestLength; uiDestCoord++ )
+ {
+ uiErrorTerm += uiSrcLength;
+
+ if ( uiErrorTerm > uiDestLength )
+ {
+ uiErrorTerm -= uiDestLength;
+ uiSrcCoord++;
+ }
+
+ //
+ // Fill out our current pixel
+ //
+
+ UINT8 * pSrcPixel = pSrcBuffer + uiSrcCoord * uiSrcStride;
+ UINT8 * pDestPixel = pDestBuffer + uiDestCoord * uiDestStride;
+
+ memcpy( pDestPixel, pSrcPixel, uiPixelPitch );
+ }
+ }
+
+ //
+ // The source line is longer than the destination, so we increment over the source.
+ //
+
+ else
+ {
+ UINT32 uiDestCoord = 0;
+
+ for ( UINT32 uiSrcCoord = 0; uiSrcCoord < uiSrcLength; uiSrcCoord++ )
+ {
+ uiErrorTerm += uiDestLength;
+
+ if ( uiErrorTerm > uiSrcLength )
+ {
+ uiErrorTerm -= uiSrcLength;
+ uiDestCoord++;
+ }
+
+ //
+ // Fill out our current pixel
+ //
+
+ UINT8 * pSrcPixel = pSrcBuffer + uiSrcCoord * uiSrcStride;
+ UINT8 * pDestPixel = pDestBuffer + uiDestCoord * uiDestStride;
+
+ memcpy( pDestPixel, pSrcPixel, uiPixelPitch );
+ }
+ }
+
+ return VN_SUCCESS;
+}
+
+VN_STATUS vnBresenhamScaleImage( CONST CVImage & pSrcImage, CVImage * pDestImage )
+{
+ if ( VN_PARAM_CHECK )
+ {
+ if ( !pDestImage )
+ {
+ return vnPostError( VN_ERROR_INVALIDARG );
+ }
+ }
+
+ //
+ // Create an image with dimensions of [ dest_width, src_height ] so that we
+ // may scale in the horizontal direction only.
+ //
+
+ CVImage tempImage;
+
+ if ( VN_FAILED( vnCreateImage( pSrcImage.QueryFormat(), pDestImage->QueryWidth(), pSrcImage.QueryHeight(), &tempImage ) ) )
+ {
+ return vnPostError( VN_ERROR_EXECUTION_FAILURE );
+ }
+
+ //
+ // First we stretch our image in the horizontal direction
+ //
+
+ UINT32 uiPixelPitch = VN_IMAGE_PIXEL_RATE( pSrcImage.QueryFormat() ) >> 3;
+
+ for ( UINT32 y = 0; y < pSrcImage.QueryHeight(); y++ )
+ {
+ UINT8 * pSrcData = pSrcImage.QueryData() + pSrcImage.BlockOffset( 0, y );
+ UINT8 * pDestData = tempImage.QueryData() + tempImage.BlockOffset( 0, y );
+
+ if ( pSrcImage.QueryWidth() == tempImage.QueryWidth() )
+ {
+ //
+ // A simple row copy will suffice
+ //
+
+ memcpy( pDestData, pSrcData, pSrcImage.RowPitch() );
+ }
+ else
+ {
+ if ( VN_FAILED( vnBresenhamScaleLine( pSrcData,
+ pSrcImage.QueryWidth(),
+ uiPixelPitch,
+ uiPixelPitch,
+ pDestData,
+ tempImage.QueryWidth(),
+ uiPixelPitch ) ) )
+ {
+ return vnPostError( VN_ERROR_EXECUTION_FAILURE );
+ }
+ }
+ }
+
+ //
+ // Check to see whether this image only required horizontal scaling. If so, simply perform a bulk copy
+ // (we're assuming there is zero undisclosed padding.)
+ //
+
+ if ( tempImage.QueryHeight() == pDestImage->QueryHeight() )
+ {
+ memcpy( pDestImage->QueryData(), tempImage.QueryData(), tempImage.SlicePitch() );
+
+ return VN_SUCCESS;
+ }
+
+ //
+ // Now scale in the vertical direction -- placing the results into our destination image
+ //
+
+ for ( UINT32 x = 0; x < tempImage.QueryWidth(); x++ )
+ {
+ UINT8 * pSrcData = tempImage.QueryData() + tempImage.BlockOffset( x, 0 );
+ UINT8 * pDestData = pDestImage->QueryData() + pDestImage->BlockOffset( x, 0 );
+
+ if ( VN_FAILED( vnBresenhamScaleLine( pSrcData,
+ tempImage.QueryHeight(),
+ tempImage.RowPitch(),
+ uiPixelPitch,
+ pDestData,
+ pDestImage->QueryHeight(),
+ pDestImage->RowPitch() ) ) )
+ {
+ return vnPostError( VN_ERROR_EXECUTION_FAILURE );
+ }
+ }
+
+ return VN_SUCCESS;
+}
+
+VN_STATUS vnScaleImage( CONST CVImage & pSrcImage, UINT32 uiWidth, UINT32 uiHeight, CVImage * pDestImage )
+{
+ if ( VN_PARAM_CHECK )
+ {
+ if ( !VN_IS_IMAGE_VALID(pSrcImage) || 0 == uiWidth || 0 == uiHeight || !pDestImage )
+ {
+ return vnPostError( VN_ERROR_INVALIDARG );
+ }
+ }
+
+ if ( uiWidth == pSrcImage.QueryWidth() && uiHeight == pSrcImage.QueryHeight() )
+ {
+ //
+ // Perform a cloning, no scaling required as our dimensions match.
+ //
+
+ return vnCloneImage( pSrcImage, pDestImage );
+ }
+
+ //
+ // Create our destination image.
+ //
+
+ if ( VN_FAILED( vnCreateImage( pSrcImage.QueryFormat(), uiWidth, uiHeight, pDestImage ) ) )
+ {
+ return vnPostError( VN_ERROR_EXECUTION_FAILURE );
+ }
+
+ //
+ // Perform our scaling operation using the aliases
+ //
+
+ if ( VN_FAILED( vnBresenhamScaleImage( pSrcImage, pDestImage ) ) )
+ {
+ return vnPostError( VN_ERROR_EXECUTION_FAILURE );
+ }
+
+ return VN_SUCCESS;
+}
diff --git a/src/libs/image-resampler/Operators/vnImageScale.h b/src/libs/image-resampler/Operators/vnImageScale.h
new file mode 100644
index 000000000..6a4835bc7
--- /dev/null
+++ b/src/libs/image-resampler/Operators/vnImageScale.h
@@ -0,0 +1,67 @@
+
+//
+// Copyright (c) 2002-2009 Joe Bertolami. All Right Reserved.
+//
+// vnImageScale.h
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice, this
+// list of conditions and the following disclaimer.
+//
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Additional Information:
+//
+// For more information, visit http://www.bertolami.com.
+//
+
+#ifndef __VN_IMAGE_SCALE_H__
+#define __VN_IMAGE_SCALE_H__
+
+#include "../Base/vnBase.h"
+#include "../Base/vnMath.h"
+#include "../Base/vnImage.h"
+
+//
+// Scale Operator
+//
+// ScaleImage rescales the image to the dimensions specified. This is distinct from ResizeImage,
+// which performs a resampling operation. Scaling is much more primitive, and produces an output
+// equal to a nearest kernel resampling. The benefits of the scale operator (which internally uses
+// Bresenham's line stretching process) is that it is fast, is entirely fixed point, and avoids
+// integer division.
+//
+// Parameters:
+//
+// pSrcImage: The read-only source image to resize.
+//
+// uiWidth: The destination width to target.
+//
+// uiHeight: The destination height to target.
+//
+// pDestImage: a pointer to an image object. Upon successful return, this object will
+// contain a scaled view of the source image.
+//
+// Supported Image Formats:
+//
+// All image formats are supported.
+//
+
+VN_STATUS vnScaleImage( CONST CVImage & pSrcImage, UINT32 uiWidth, UINT32 uiHeight, CVImage * pDestImage );
+
+#endif // __VN_IMAGE_SCALE_H__
\ No newline at end of file
diff --git a/src/libs/image-resampler/Utilities/vnImageBlock.cpp b/src/libs/image-resampler/Utilities/vnImageBlock.cpp
new file mode 100644
index 000000000..6cf6dd80d
--- /dev/null
+++ b/src/libs/image-resampler/Utilities/vnImageBlock.cpp
@@ -0,0 +1,95 @@
+
+#include "vnImageBlock.h"
+
+VN_STATUS vnConvertBlock( CONST VN_PIXEL_BLOCK & pSrc, VN_IMAGE_FORMAT destFormat, VN_PIXEL_BLOCK * pDest )
+{
+ if ( VN_PARAM_CHECK )
+ {
+ if ( !pDest )
+ {
+ return vnPostError( VN_ERROR_INVALIDARG );
+ }
+ }
+
+ //
+ // (!) Note: this function is written to support aliased conversion. That is, conversions where pSrc
+ // and pDest are aliases of the same block.
+ //
+
+ //
+ // Our next step is to copy over the block channel data with proper conversions for the
+ // (potential) precision change.
+ //
+
+ VN_IMAGE_PRECISION destPrecision = ( VN_IS_FLOAT_FORMAT( destFormat ) ) ? VN_IMAGE_PRECISION_FLOAT : VN_IMAGE_PRECISION_FIXED;
+
+ //
+ // Note that we store the destination channel count but do not take it into consideration
+ // when performing the channel conversion. In this way we allow zero values to propagate from
+ // the source to destination in the event of a channel count mismatch.
+ //
+
+ if ( pSrc.uiPrecision == destPrecision )
+ {
+ //
+ // Same precision, simply copy over the data and format
+ //
+
+ memcpy( pDest->uiChannelBytes, pSrc.uiChannelBytes, 32 );
+ }
+
+ //
+ // Conversion between fixed/float format
+ //
+
+ else
+ {
+ if ( VN_IMAGE_PRECISION_FLOAT == destPrecision )
+ {
+ //
+ // fixed -> float, we perform a normalizing scale
+ //
+
+ for ( UINT8 i = 0; i < 4; i++ )
+ {
+ if ( pSrc.iChannelData[i] < 0 )
+ {
+ pDest->fChannelData[i] = ( (FLOAT64) -1 * pSrc.iChannelData[i] / VN_MIN_INT32 );
+ }
+ else
+ {
+ pDest->fChannelData[i] = (FLOAT64) pSrc.iChannelData[i] / VN_MAX_INT32;
+ }
+
+ pDest->fChannelData[i] = vnClipRange64( pDest->fChannelData[i], -1.0, 1.0 );
+ }
+ }
+ else
+ {
+ //
+ // float -> fixed, we perform a truncating scale
+ //
+
+ FLOAT64 fTempChannels[4] = {0};
+
+ for ( UINT8 i = 0; i < 4; i++ )
+ {
+ fTempChannels[i] = vnClipRange64( pSrc.fChannelData[i], -1.0f, 1.0f );
+
+ if ( fTempChannels[i] < 0 )
+ {
+ pDest->iChannelData[i] = (INT64) ( (FLOAT64) -1.0 * fTempChannels[i] * VN_MIN_INT32 );
+ }
+ else
+ {
+ pDest->iChannelData[i] = (INT64) ( (FLOAT64) fTempChannels[i] * VN_MAX_INT32 );
+ }
+ }
+ }
+ }
+
+ pDest->uiChannelCount = VN_IMAGE_CHANNEL_COUNT( destFormat );
+ pDest->uiPrecision = destPrecision;
+
+ return VN_SUCCESS;
+}
\ No newline at end of file
diff --git a/src/libs/image-resampler/Utilities/vnImageBlock.h b/src/libs/image-resampler/Utilities/vnImageBlock.h
new file mode 100644
index 000000000..a803bab55
--- /dev/null
+++ b/src/libs/image-resampler/Utilities/vnImageBlock.h
@@ -0,0 +1,125 @@
+
+//
+// Copyright (c) 2002-2009 Joe Bertolami. All Right Reserved.
+//
+// vnImageBlock.h
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice, this
+// list of conditions and the following disclaimer.
+//
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Additional Information:
+//
+// For more information, visit http://www.bertolami.com.
+//
+#ifndef __VN_IMAGE_BLOCK_H__
+#define __VN_IMAGE_BLOCK_H__
+
+#include "../Base/vnBase.h"
+#include "../Base/vnMath.h"
+#include "../Base/vnImage.h"
+
+//
+// Image Blocks
+//
+// This image library supports a broad set of image formats that vary in terms
+// of their channel counts, pixel bit rates, precision, endian-ness, signed-ness,
+// and color space.
+//
+// As a result, we rely upon the pixel block structure to perform simple conversions
+// between our native image formats and a canonical 64 bit pixel format (which we
+// universally store in pixel blocks).
+//
+// This system allows the rest of the image library to support most image formats
+// in a very simple manner.
+//
+
+typedef struct VN_PIXEL_BLOCK
+{
+ //
+ // This structure deliberately sheds the notion of the image format.
+ // Pixel blocks represent a common normalized pixel format that is
+ // restricted to 32 bit channel formats.
+ //
+
+ union
+ {
+ FLOAT64 fChannelData[4];
+ INT64 iChannelData[4];
+ UINT8 uiChannelBytes[32];
+ };
+
+ VN_IMAGE_PRECISION uiPrecision;
+ UINT8 uiChannelCount;
+
+} VN_PIXEL_BLOCK;
+
+//
+// (!) Notes on precision
+//
+// Although we internally store our pixel data as 256 bit signed precision,
+// we only use the low 32 bits of each pixel block channel when packing and
+// unpacking the data.
+//
+// Thus, it is the responsibility of operator designers to ensure that pixel
+// block data is within this valid (signed or unsigned) range prior to packing
+// it into an image structure.
+//
+// We internally manage pixels at 256 bit precision to enable large summations
+// within kernels without overflowing our integer storage, and we store pixels
+// internally in a signed format so that kernels may use them to directly perform
+// signed arithmetic.
+//
+// * A slight loss of precision will occur when packing/unpacking 32 bpp
+// unsigned images. This trade-off was made in consideration of performance
+// issues.
+//
+// When converting between precision formats, a couple of conversion rules may
+// apply:
+//
+// o: When converting fixed->fixed of different precisions, the values are scaled
+// to ensure that the conversion maintains the integrity of the values as a
+// percentage of the total viable range.
+//
+// o: When converting float->fixed, the source values are saturated to [-1,1].
+//
+// o: When converting fixed->float, the entire fixed integer range is packed within
+// the float range of [-1, 1].
+//
+// All integer values are represented internally as signed values.
+//
+// Note also that an implicit precision conversion may be performed when packing a
+// block block into a raw buffer if the destination buffer precision does not match the
+// source block precision.
+//
+
+VN_STATUS vnConvertToBlock( UINT8 * pRawPixel, VN_IMAGE_FORMAT internalFormat, VN_PIXEL_BLOCK * pOutBlock );
+
+VN_STATUS vnConvertFromBlock( CONST VN_PIXEL_BLOCK & pSourceBlock, VN_IMAGE_FORMAT format, UINT8 * pRawPixel );
+
+//
+// vnConvertBlock
+//
+// Performs a block conversion based on a destination format.
+//
+
+VN_STATUS vnConvertBlock( CONST VN_PIXEL_BLOCK & pSrc, VN_IMAGE_FORMAT destFormat, VN_PIXEL_BLOCK * pDest );
+
+#endif // __VN_IMAGE_BLOCK_H__
diff --git a/src/libs/image-resampler/Utilities/vnImageBlockPack.cpp b/src/libs/image-resampler/Utilities/vnImageBlockPack.cpp
new file mode 100644
index 000000000..7c68d2c17
--- /dev/null
+++ b/src/libs/image-resampler/Utilities/vnImageBlockPack.cpp
@@ -0,0 +1,228 @@
+
+#include "vnImageBlock.h"
+
+VN_STATUS vnPackFloatChannel( CONST FLOAT64 & pInChannel, UINT8 uiChannelRate, UINT8 * pRawChannel )
+{
+ //
+ // This parameter check is exhaustive, since this function is static and will not presently
+ // be called with unchecked parameters, but we redundantly validate the parameters in case
+ // this design changes in the future.
+ //
+
+ if ( VN_PARAM_CHECK )
+ {
+ if ( 0 == uiChannelRate || !pRawChannel )
+ {
+ return vnPostError( VN_ERROR_INVALIDARG );
+ }
+ }
+
+ //
+ // This function is responsible for writing a 64 bit float value and packing it into a 16 or
+ // 32 bit float value for output.
+ //
+
+ switch ( uiChannelRate )
+ {
+ case 16: *( (FLOAT16 *) pRawChannel ) = (FLOAT32) pInChannel; break;
+ case 32: *( (FLOAT32 *) pRawChannel ) = (FLOAT32) pInChannel; break;
+
+ //
+ // We only support 16 or 32 bit floating point types.
+ //
+
+ default: return vnPostError( VN_ERROR_EXECUTION_FAILURE );
+
+ }
+
+ return VN_SUCCESS;
+}
+
+VN_STATUS vnPackFixedChannel( INT64 pInChannel, BOOL bSigned, UINT8 uiChannelRate, UINT32 uiSumOffset, UINT64 * pRawData )
+{
+ //
+ // (!) Note: We are expecting a logical shift here, and further expect that a shift
+ // by 64 will produce a zero value. Furthermore, it is crucial that we cast our
+ // shift operation to 64 bit precision, otherwise the compiler will infer the precision
+ // from the operands which will fault our 128 bit pixel support.
+ //
+
+ UINT64 uiMaskChannelVal = ( (UINT64) 1 << uiChannelRate ) - 1; // bit mask for our destination channel
+ UINT64 uiMaxChannelVal = uiMaskChannelVal; // max unsigned value
+ INT64 iMaxChannelVal = (INT64) ( uiMaxChannelVal >> 1 ); // max signed value
+ INT64 iMinChannelVal = -iMaxChannelVal - 1; // min signed value
+
+ //
+ // If we are a signed format, negative values will be written naturally into the packed
+ // buffer. If we are unsigned however, we must clamp to a valid unsigned range.
+ //
+
+ INT64 iChannelData = 0;
+
+ if ( bSigned )
+ {
+ if ( pInChannel < VN_MIN_INT32 ) pInChannel = VN_MIN_INT32;
+ if ( pInChannel > VN_MAX_INT32 ) pInChannel = VN_MAX_INT32;
+
+ if ( pInChannel < 0 )
+ {
+ iChannelData = (INT64) iMinChannelVal * pInChannel / VN_MIN_INT32;
+ }
+ else
+ {
+ iChannelData = (INT64) iMaxChannelVal * pInChannel / VN_MAX_INT32;
+ }
+ }
+ else
+ {
+ if ( pInChannel < 0 ) pInChannel = 0;
+ if ( pInChannel > VN_MAX_INT32 ) pInChannel = VN_MAX_INT32;
+
+ iChannelData = (INT64) uiMaxChannelVal * pInChannel / VN_MAX_INT32;
+ }
+
+ UINT64 uiShiftedValue = (UINT64) ( iChannelData & uiMaskChannelVal ) << uiSumOffset;
+
+ //
+ // Next we insert our shifted value into the pRawData stream by first clearing
+ // the current destination bits in the stream, and then placing our updated
+ // value.
+ //
+
+ (*pRawData) = (*pRawData) & ~( uiMaskChannelVal << uiSumOffset );
+
+ (*pRawData) = (*pRawData) | uiShiftedValue;
+
+ return VN_SUCCESS;
+}
+
+VN_STATUS vnPackFloatPixel( CONST VN_PIXEL_BLOCK & pSourceBlock, VN_IMAGE_FORMAT format, UINT8 * pRawPixel )
+{
+ UINT32 uiSumOffset = 0;
+ UINT8 uiChannelCount = VN_IMAGE_CHANNEL_COUNT( format );
+ UINT8 uiPixelRate = VN_IMAGE_PIXEL_RATE( format );
+
+ for ( UINT32 i = 0; i < uiChannelCount; i++ )
+ {
+ UINT8 uiRate = VN_IMAGE_CHANNEL_RATE( i, format );
+ UINT8 * pRawChannel = pRawPixel + uiSumOffset;
+
+ //
+ // Convert the type (if necessary), and safeguard against invalid formats.
+ //
+
+ if ( VN_FAILED( vnPackFloatChannel( pSourceBlock.fChannelData[i], uiRate, pRawChannel ) ) )
+ {
+ return vnPostError( VN_ERROR_EXECUTION_FAILURE );
+ }
+
+ uiSumOffset += ( uiRate >> 3 );
+ }
+
+ return VN_SUCCESS;
+}
+
+VN_STATUS vnPackFixedPixel( CONST VN_PIXEL_BLOCK & pSourceBlock, VN_IMAGE_FORMAT format, UINT8 * pRawPixel )
+{
+ //
+ // (!) Note: our summation offset is a *bit* offset for fixed channels, not
+ // a byte offset like we use for float channels.
+ //
+
+ UINT64 uiRawData = 0;
+ UINT32 uiSumOffset = 0;
+ UINT8 uiChannelCount = VN_IMAGE_CHANNEL_COUNT( format );
+ UINT8 uiPixelRate = VN_IMAGE_PIXEL_RATE( format );
+
+ for ( UINT32 i = 0; i < uiChannelCount; i++ )
+ {
+ UINT8 uiChannelRate = VN_IMAGE_CHANNEL_RATE( i, format );
+
+ //
+ // The sum of all channel rates of a pixel must be byte aligned, with a maximum
+ // of 8 bytes per pixel. Additionally, our individual channels are limited to
+ // 32 bits.
+ //
+ // We therefore write our pixel data to the output in batches of 32 or fewer bits.
+ //
+
+ if ( VN_FAILED( vnPackFixedChannel( pSourceBlock.iChannelData[i], VN_IS_SIGNED_FORMAT( format ), uiChannelRate, uiSumOffset, &uiRawData ) ) )
+ {
+ return vnPostError( VN_ERROR_EXECUTION_FAILURE );
+ }
+
+ uiSumOffset += uiChannelRate;
+
+ //
+ // If we've packed 32 or more bits so far, we write out 4 bytes and shift our counters.
+ // This will enable us to pack and store more than 64 bits per pixel.
+ //
+
+ if ( uiSumOffset >= 32 )
+ {
+ //
+ // Copy out four bytes and advance our counters.
+ //
+
+ memcpy( pRawPixel, &uiRawData, 4 );
+
+ uiSumOffset = ( uiSumOffset % 32 );
+ uiRawData = (UINT64) uiRawData >> 32;
+ pRawPixel = pRawPixel + 4;
+ }
+ }
+
+ //
+ // If there are any remaining pixels in our raw data buffer, they must be
+ // byte aligned.
+ //
+
+ if ( uiSumOffset )
+ {
+ memcpy( pRawPixel, &uiRawData, ( uiSumOffset >> 3 ) );
+ }
+
+ return VN_SUCCESS;
+}
+
+VN_STATUS vnConvertFromBlock( CONST VN_PIXEL_BLOCK & pSourceBlock, VN_IMAGE_FORMAT format, UINT8 * pRawPixel )
+{
+ if ( VN_PARAM_CHECK )
+ {
+ if ( !pRawPixel || !VN_IS_FORMAT_VALID( format ) )
+ {
+ return vnPostError( VN_ERROR_INVALIDARG );
+ }
+ }
+
+ //
+ // Check to verify that the precision matches between our source block and the destination
+ // format. This function does not perform precision conversion (it is assumed to have already
+ // been performed).
+ //
+
+ BOOL bFloatFormat = !!( VN_IS_FLOAT_FORMAT( format ) );
+
+ VN_IMAGE_PRECISION formatPrecision = bFloatFormat ? VN_IMAGE_PRECISION_FLOAT : VN_IMAGE_PRECISION_FIXED;
+
+ if ( formatPrecision != pSourceBlock.uiPrecision )
+ {
+ //
+ // Perform a sudden block conversion to match the required precision.
+ //
+
+ VN_PIXEL_BLOCK tempBlock;
+
+ vnConvertBlock( pSourceBlock, format, &tempBlock );
+
+ if ( bFloatFormat ) return vnPackFloatPixel( tempBlock, format, pRawPixel );
+ else return vnPackFixedPixel( tempBlock, format, pRawPixel );
+ }
+ else
+ {
+ if ( bFloatFormat ) return vnPackFloatPixel( pSourceBlock, format, pRawPixel );
+ else return vnPackFixedPixel( pSourceBlock, format, pRawPixel );
+ }
+
+ return VN_SUCCESS;
+}
\ No newline at end of file
diff --git a/src/libs/image-resampler/Utilities/vnImageBlockUnpack.cpp b/src/libs/image-resampler/Utilities/vnImageBlockUnpack.cpp
new file mode 100644
index 000000000..0bc62995c
--- /dev/null
+++ b/src/libs/image-resampler/Utilities/vnImageBlockUnpack.cpp
@@ -0,0 +1,245 @@
+
+#include "vnImageBlock.h"
+
+VN_STATUS vnUnpackFloatChannel( UINT8 uiChannelRate, UINT8 * pRawChannel, FLOAT64 * pOutChannel )
+{
+ //
+ // This parameter check is exhaustive, since this function is static and will not presently
+ // be called with unchecked parameters, but we redundantly validate the parameters in case
+ // this design changes in the future.
+ //
+
+ if ( VN_PARAM_CHECK )
+ {
+ if ( 0 == uiChannelRate || !pOutChannel )
+ {
+ return vnPostError( VN_ERROR_INVALIDARG );
+ }
+ }
+
+ //
+ // This function unpacks a 16 or 32 bit float value, and stores it into a 64 bit float output.
+ //
+
+ switch ( uiChannelRate )
+ {
+ case 16:
+ {
+ FLOAT16 * pHalf = ( (FLOAT16 *) pRawChannel );
+
+ (*pOutChannel) = (FLOAT64) FLOAT16::ToFloat32( *pHalf );
+
+ } break;
+
+ case 32:
+ {
+ FLOAT32 * pSingle = ( (FLOAT32 *) pRawChannel );
+
+ (*pOutChannel) = (FLOAT64) *pSingle;
+
+ } break;
+
+ //
+ // We only support 16 or 32 bit floating point types.
+ //
+
+ default: return vnPostError( VN_ERROR_EXECUTION_FAILURE );
+
+ }
+
+ return VN_SUCCESS;
+}
+
+VN_STATUS vnUnpackFixedChannel( UINT8 uiChannelRate, BOOL bSigned, UINT64 uiRawData, UINT64 * pOutChannel )
+{
+ //
+ // This parameter check is exhaustive, since this function is static and will not presently
+ // be called with unchecked parameters, but we redundantly validate the parameters in case
+ // this design changes in the future.
+ //
+
+ if ( VN_PARAM_CHECK )
+ {
+ if ( 0 == uiChannelRate || !pOutChannel )
+ {
+ return vnPostError( VN_ERROR_INVALIDARG );
+ }
+ }
+
+ //
+ // (!) We are expecting a logical shift here, and further expect that a shift
+ // by 64 will produce a zero value.
+ //
+
+ UINT64 uiMaskChannelVal = ( (UINT64) 1 << uiChannelRate ) - 1; // bit mask for our channel
+ UINT64 uiMaxChannelVal = uiMaskChannelVal; // max unsigned value
+ INT64 iMaxChannelVal = (INT64) ( uiMaxChannelVal >> 1 ); // max signed value
+ INT64 iMinChannelVal = -iMaxChannelVal - 1; // min signed value
+
+ //
+ // This function unpacks fixed integer values and stores them in the lower
+ // 32 bits of a signed 64 bit output. Note that unlike our float unpack equivalent,
+ // this function returns the number of *bits* that were processed, rather than bytes.
+ //
+
+ if ( bSigned )
+ {
+ INT64 iRawData = (INT64) uiRawData;
+
+ //
+ // First we check our sign bit, and perform an extention if necessary.
+ //
+
+ if ( iRawData & ( (INT64) 1 << ( uiChannelRate - 1 ) ) )
+ {
+ iRawData |= ~uiMaskChannelVal;
+ }
+
+ if ( iRawData < 0 )
+ {
+ (*pOutChannel) = (INT64) VN_MIN_INT32 * iRawData / iMinChannelVal;
+ }
+ else
+ {
+ (*pOutChannel) = (INT64) VN_MAX_INT32 * iRawData / iMaxChannelVal;
+ }
+ }
+
+ //
+ // Otherwise, handle the unsigned channel case
+ //
+
+ else
+ {
+ (*pOutChannel) = (INT64) VN_MAX_INT32 * uiRawData / uiMaxChannelVal;
+ }
+
+ return VN_SUCCESS;
+}
+
+VN_STATUS vnUnpackFloatPixel( UINT8 * pRawPixel, VN_IMAGE_FORMAT format, VN_PIXEL_BLOCK * pOutBlock )
+{
+ if ( VN_PARAM_CHECK )
+ {
+ if ( !pRawPixel || !VN_IS_FORMAT_VALID( format ) || !pOutBlock )
+ {
+ return vnPostError( VN_ERROR_INVALIDARG );
+ }
+ }
+
+ UINT8 uiPixelRate = VN_IMAGE_PIXEL_RATE( format );
+ UINT8 uiChannelCount = VN_IMAGE_CHANNEL_COUNT( format );
+ UINT32 uiSumOffset = 0;
+
+ for ( UINT32 i = 0; i < uiChannelCount; i++ )
+ {
+ UINT8 uiRate = VN_IMAGE_CHANNEL_RATE( i, format );
+ UINT8 * pRawChannel = pRawPixel + uiSumOffset;
+
+ //
+ // Convert the type (if necessary), and safeguard against invalid formats.
+ //
+
+ if ( VN_FAILED( vnUnpackFloatChannel( uiRate, pRawChannel, &(pOutBlock->fChannelData[i]) ) ) )
+ {
+ return vnPostError( VN_ERROR_EXECUTION_FAILURE );
+ }
+
+ uiSumOffset += ( uiRate >> 3 );
+ }
+
+ pOutBlock->uiPrecision = VN_IMAGE_PRECISION_FLOAT;
+ pOutBlock->uiChannelCount = uiChannelCount;
+
+ return VN_SUCCESS;
+}
+
+VN_STATUS vnUnpackFixedPixel( UINT8 * pRawPixel, VN_IMAGE_FORMAT format, VN_PIXEL_BLOCK * pOutBlock )
+{
+ if ( VN_PARAM_CHECK )
+ {
+ if ( !pRawPixel || !VN_IS_FORMAT_VALID( format ) || !pOutBlock )
+ {
+ return vnPostError( VN_ERROR_INVALIDARG );
+ }
+ }
+
+ UINT8 uiPixelRate = VN_IMAGE_PIXEL_RATE( format );
+ UINT8 uiChannelCount = VN_IMAGE_CHANNEL_COUNT( format );
+
+ //
+ // (!) Note: our summation offset is a *bit* offset for fixed channels, not
+ // a byte offset like we use for float channels.
+ //
+
+ UINT32 uiSumOffset = 0;
+
+ for ( UINT32 i = 0; i < uiChannelCount; i++ )
+ {
+ UINT64 uiRawData = 0;
+ UINT32 uiBytesRead = ( uiSumOffset >> 3 );
+ UINT8 uiChannelRate = VN_IMAGE_CHANNEL_RATE( i, format );
+
+ //
+ // The maximum allowable size per channel is currently 32 bits. We
+ // move a dynamic sliding window along the pixel buffer and select
+ // the appropriate bits for each channel.
+ //
+
+ memcpy( &uiRawData, pRawPixel + uiBytesRead, VN_MIN2( 8, ( uiPixelRate >> 3 ) - uiBytesRead ) );
+
+ //
+ // Each channel may contain, at most, 8 bytes of data, so we temporarily
+ // store channel data in a 64 bit unsigned integer, and select the bits
+ // that are relevant for this channel.
+ //
+
+ //
+ // (!) Note: it is crucial that we cast our shift operation to 64 bit precision, otherwise
+ // the compiler will infer the precision from the operands which will fault our
+ // 128 bit pixel support.
+ //
+
+ uiRawData = ( uiRawData >> ( uiSumOffset % 8 ) ) & ( ( (UINT64) 1 << uiChannelRate ) - 1 );
+
+ //
+ // Convert the type (if necessary), and safeguard against invalid formats.
+ //
+
+ if ( VN_FAILED( vnUnpackFixedChannel( uiChannelRate, VN_IS_SIGNED_FORMAT( format ), uiRawData, (UINT64 *) &( pOutBlock->iChannelData[i] ) ) ) )
+ {
+ return vnPostError( VN_ERROR_EXECUTION_FAILURE );
+ }
+
+ uiSumOffset += uiChannelRate;
+ }
+
+ pOutBlock->uiPrecision = VN_IMAGE_PRECISION_FIXED;
+ pOutBlock->uiChannelCount = uiChannelCount;
+
+ return VN_SUCCESS;
+}
+
+VN_STATUS vnConvertToBlock( UINT8 * pRawPixel, VN_IMAGE_FORMAT internalFormat, VN_PIXEL_BLOCK * pOutBlock )
+{
+ if ( VN_PARAM_CHECK )
+ {
+ if ( !pRawPixel || !VN_IS_FORMAT_VALID( internalFormat ) || !pOutBlock )
+ {
+ return vnPostError( VN_ERROR_INVALIDARG );
+ }
+ }
+
+ //
+ // The design of this function aimed at balancing simplicity and ease of readability, with performance. We try
+ // to push as many branches out as possible (with the channel inner branch a necessity due to formats with
+ // heterogeneous channel rates).
+ //
+
+ BOOL bFloatFormat = !!( VN_IS_FLOAT_FORMAT( internalFormat ) );
+
+ if ( bFloatFormat ) return vnUnpackFloatPixel( pRawPixel, internalFormat, pOutBlock );
+ else return vnUnpackFixedPixel( pRawPixel, internalFormat, pOutBlock );
+
+ return VN_SUCCESS;
+}
diff --git a/src/libs/image-resampler/Utilities/vnImageSampler.cpp b/src/libs/image-resampler/Utilities/vnImageSampler.cpp
new file mode 100644
index 000000000..91ac487ff
--- /dev/null
+++ b/src/libs/image-resampler/Utilities/vnImageSampler.cpp
@@ -0,0 +1,99 @@
+
+#include "vnImageSampler.h"
+#include "vnImageBlock.h"
+
+VN_STATUS vnSampleImage( CONST CVImage & pSrcImage,
+ VN_IMAGE_KERNEL_TYPE uiKernel,
+ VN_IMAGE_KERNEL_DIRECTION uiDirection,
+ FLOAT32 fX,
+ FLOAT32 fY,
+ FLOAT32 fRadius,
+ UINT8 * pRawOutput )
+{
+ if ( VN_PARAM_CHECK )
+ {
+ if ( !VN_IS_IMAGE_VALID( pSrcImage ) || !pRawOutput )
+ {
+ return vnPostError( VN_ERROR_INVALIDARG );
+ }
+
+ if ( fX >= pSrcImage.QueryWidth() || fY >= pSrcImage.QueryHeight() )
+ {
+ return vnPostError( VN_ERROR_INVALIDARG );
+ }
+ }
+
+ //
+ // Most of our kernels are internally separable, but care must be taken when writing operators
+ // to ensure that we use the appropriate kernel and mode. Not all of our kernels are separable,
+ // even though they support a 1D interface.
+ //
+
+ switch ( uiDirection )
+ {
+ default: break;
+
+ case VN_IMAGE_KERNEL_1D_HORIZONTAL:
+ case VN_IMAGE_KERNEL_1D_VERTICAL:
+ {
+ //
+ // Here we safeguard against error if our enum should shift or expand. This could
+ // be done with a LUT and better parameterization, but we'd like to keep this as
+ // simple and readable as possible.
+ //
+
+ BOOL bDirection = ( VN_IMAGE_KERNEL_1D_VERTICAL == uiDirection );
+
+ switch ( uiKernel )
+ {
+ default: break;
+
+ case VN_IMAGE_KERNEL_NEAREST: return vnNearestKernel( pSrcImage, fX, fY, pRawOutput );
+ case VN_IMAGE_KERNEL_AVERAGE: return vnAverageKernel( pSrcImage, fX, fY, bDirection, fRadius, pRawOutput );
+ case VN_IMAGE_KERNEL_BILINEAR: return vnBilinearKernel( pSrcImage, fX, fY, bDirection, pRawOutput );
+ case VN_IMAGE_KERNEL_GAUSSIAN: return vnGaussianKernel( pSrcImage, fX, fY, bDirection, fRadius, pRawOutput );
+ case VN_IMAGE_KERNEL_BICUBIC: return vnBicubicKernel( pSrcImage, 0, 1, fX, fY, bDirection, pRawOutput );
+ case VN_IMAGE_KERNEL_CATMULL: return vnBicubicKernel( pSrcImage, 0, 0.5f, fX, fY, bDirection, pRawOutput );
+ case VN_IMAGE_KERNEL_MITCHELL: return vnBicubicKernel( pSrcImage, 1.0f/3.0f, 1.0f/3.0f, fX, fY, bDirection, pRawOutput );
+ case VN_IMAGE_KERNEL_CARDINAL: return vnBicubicKernel( pSrcImage, 0, 0.75f, fX, fY, bDirection, pRawOutput );
+ case VN_IMAGE_KERNEL_BSPLINE: return vnBicubicKernel( pSrcImage, 1, 0, fX, fY, bDirection, pRawOutput );
+ case VN_IMAGE_KERNEL_SPLINE: return vnSplineKernel( pSrcImage, fX, fY, bDirection, pRawOutput );
+ case VN_IMAGE_KERNEL_LANCZOS: return vnLanczosKernel( pSrcImage, 1, fX, fY, bDirection, pRawOutput );
+ case VN_IMAGE_KERNEL_LANCZOS2: return vnLanczosKernel( pSrcImage, 2, fX, fY, bDirection, pRawOutput );
+ case VN_IMAGE_KERNEL_LANCZOS3: return vnLanczosKernel( pSrcImage, 3, fX, fY, bDirection, pRawOutput );
+ case VN_IMAGE_KERNEL_LANCZOS4: return vnLanczosKernel( pSrcImage, 4, fX, fY, bDirection, pRawOutput );
+ case VN_IMAGE_KERNEL_LANCZOS5: return vnLanczosKernel( pSrcImage, 5, fX, fY, bDirection, pRawOutput );
+ case VN_IMAGE_KERNEL_COVERAGE: return vnCoverageKernel( pSrcImage, fX, fY, bDirection, fRadius, pRawOutput );
+ }
+
+ } break;
+
+ case VN_IMAGE_KERNEL_2D_COMBINED:
+ {
+ switch ( uiKernel )
+ {
+ default: break;
+
+ case VN_IMAGE_KERNEL_NEAREST: return vnNearestKernel( pSrcImage, fX, fY, pRawOutput );
+ case VN_IMAGE_KERNEL_AVERAGE: return vnAverageKernel( pSrcImage, fX, fY, fRadius, pRawOutput );
+ case VN_IMAGE_KERNEL_BILINEAR: return vnBilinearKernel( pSrcImage, fX, fY, pRawOutput );
+ case VN_IMAGE_KERNEL_GAUSSIAN: return vnGaussianKernel( pSrcImage, fX, fY, fRadius, pRawOutput );
+ case VN_IMAGE_KERNEL_BICUBIC: return vnBicubicKernel( pSrcImage, 0, 1, fX, fY, pRawOutput );
+ case VN_IMAGE_KERNEL_CATMULL: return vnBicubicKernel( pSrcImage, 0, 0.5f, fX, fY, pRawOutput );
+ case VN_IMAGE_KERNEL_MITCHELL: return vnBicubicKernel( pSrcImage, 1.0f/3.0f, 1.0f/3.0f, fX, fY, pRawOutput );
+ case VN_IMAGE_KERNEL_CARDINAL: return vnBicubicKernel( pSrcImage, 0, 0.75f, fX, fY, pRawOutput );
+ case VN_IMAGE_KERNEL_BSPLINE: return vnBicubicKernel( pSrcImage, 1, 0, fX, fY, pRawOutput );
+ case VN_IMAGE_KERNEL_SPLINE: return vnSplineKernel( pSrcImage, fX, fY, pRawOutput );
+ case VN_IMAGE_KERNEL_LANCZOS: return vnLanczosKernel( pSrcImage, 1, fX, fY, pRawOutput );
+ case VN_IMAGE_KERNEL_LANCZOS2: return vnLanczosKernel( pSrcImage, 2, fX, fY, pRawOutput );
+ case VN_IMAGE_KERNEL_LANCZOS3: return vnLanczosKernel( pSrcImage, 3, fX, fY, pRawOutput );
+ case VN_IMAGE_KERNEL_LANCZOS4: return vnLanczosKernel( pSrcImage, 4, fX, fY, pRawOutput );
+ case VN_IMAGE_KERNEL_LANCZOS5: return vnLanczosKernel( pSrcImage, 5, fX, fY, pRawOutput );
+ case VN_IMAGE_KERNEL_COVERAGE: return vnCoverageKernel( pSrcImage, fX, fY, fRadius, pRawOutput );
+ }
+
+ } break;
+ }
+
+ return VN_ERROR_NOTIMPL;
+}
\ No newline at end of file
diff --git a/src/libs/image-resampler/Utilities/vnImageSampler.h b/src/libs/image-resampler/Utilities/vnImageSampler.h
new file mode 100644
index 000000000..00a47b252
--- /dev/null
+++ b/src/libs/image-resampler/Utilities/vnImageSampler.h
@@ -0,0 +1,231 @@
+
+//
+// Copyright (c) 2002-2009 Joe Bertolami. All Right Reserved.
+//
+// vnImageSampler.h
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice, this
+// list of conditions and the following disclaimer.
+//
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Description:
+//
+// Image Sampler is a function that can be used to sample a single pixel in an image. It supports
+// both single and double pass sampling of 1D and 2D data sets.
+//
+// Additional Information:
+//
+// For more information, visit http://www.bertolami.com.
+//
+
+#ifndef __VN_IMAGE_SAMPLE_H__
+#define __VN_IMAGE_SAMPLE_H__
+
+#include "../Base/vnBase.h"
+#include "../Base/vnMath.h"
+#include "../Base/vnImage.h"
+#include "../Base/vnImageFormat.h"
+
+//
+// Supported filter set
+//
+
+#include "../Kernels/vnImageGaussian.h"
+#include "../Kernels/vnImageAverage.h"
+#include "../Kernels/vnImageBilinear.h"
+#include "../Kernels/vnImageNearest.h"
+#include "../Kernels/vnImageBicubic.h"
+#include "../Kernels/vnImageSpline.h"
+#include "../Kernels/vnImageLanczos.h"
+#include "../Kernels/vnImageCoverage.h"
+
+//
+// Image Sampler
+//
+// Within the image filter layer, the image sampler is the nexus between the image
+// operators, and the kernels, as shown below. The image sampler, in many cases, allows the
+// operators to be filter agnostic and support a wide variety of kernels.
+//
+// Image Operators Caveats:
+//
+// | 1. Care must be taken when using the sampler within an
+// +-------|-------+ operator. Certain kernels are non-separable and will
+// \ | / *not* fail if asked to filter a 1D data set, but will
+// \ | / not produce accurate results if applied to an image in
+// \ | / a separable manner.
+// \ | /
+// ---|--- 2. Sampling with these kernels is very simple, and does not
+// V incorporate a number of important features like sub-pixel
+// estimation. These features are handled by the higher level
+// Sampler operators.
+//
+// ^ 3. Format support is decided on a per-kernel basis. Thus,
+// ---|--- kernels are not required to support all formats. Pay special
+// / | \ attention to return codes, as they may indicate an invalid
+// / | \ sample request.
+// / | \
+// / | \
+// +-------|-------+
+// |
+//
+// Image Kernels
+//
+
+#define VN_IMAGE_MAKE_KERNEL( sep, index ) ( ( (sep) << 0x1F ) | ( ( index ) & 0x7FFFFFFF ) )
+
+//
+// (!) Note: the individual header files for each of the following filters contain useful
+// information for selecting a particular filter for an operation.
+//
+
+enum VN_IMAGE_KERNEL_TYPE
+{
+ //
+ // Linear family
+ //
+
+ VN_IMAGE_KERNEL_NEAREST = VN_IMAGE_MAKE_KERNEL( 1, 0 ), // nearest neighbor
+ VN_IMAGE_KERNEL_AVERAGE = VN_IMAGE_MAKE_KERNEL( 1, 1 ), // simple averaging over the kernel space
+ VN_IMAGE_KERNEL_BILINEAR = VN_IMAGE_MAKE_KERNEL( 1, 2 ), // bilinear interpolation
+
+ //
+ // Mitchell-Netravali Cubic family
+ //
+
+ VN_IMAGE_KERNEL_BICUBIC = VN_IMAGE_MAKE_KERNEL( 1, 4 ), // generic bicubic kernel
+ VN_IMAGE_KERNEL_CATMULL = VN_IMAGE_MAKE_KERNEL( 1, 5 ), // Catmull-Rom spline, popularized by GIMP
+ VN_IMAGE_KERNEL_MITCHELL = VN_IMAGE_MAKE_KERNEL( 1, 6 ), // cubic mitchell-netravali kernel
+ VN_IMAGE_KERNEL_CARDINAL = VN_IMAGE_MAKE_KERNEL( 1, 7 ), // cubic, popularized by Adobe Photoshop
+ VN_IMAGE_KERNEL_SPLINE = VN_IMAGE_MAKE_KERNEL( 0, 8 ), // cubic spline
+ VN_IMAGE_KERNEL_BSPLINE = VN_IMAGE_MAKE_KERNEL( 1, 9 ), // cubic b-spline, popularized by Paint.NET
+
+ //
+ // Non-separable cubic family
+ //
+
+ VN_IMAGE_KERNEL_LANCZOS = VN_IMAGE_MAKE_KERNEL( 0, 10 ), // lanczos-1
+ VN_IMAGE_KERNEL_LANCZOS2 = VN_IMAGE_MAKE_KERNEL( 0, 11 ), // lanczos-2
+ VN_IMAGE_KERNEL_LANCZOS3 = VN_IMAGE_MAKE_KERNEL( 0, 12 ), // lanczos-3
+ VN_IMAGE_KERNEL_LANCZOS4 = VN_IMAGE_MAKE_KERNEL( 0, 13 ), // lanczos-4
+ VN_IMAGE_KERNEL_LANCZOS5 = VN_IMAGE_MAKE_KERNEL( 0, 14 ), // lanczos-5
+
+ //
+ // Distribution family
+ //
+
+ VN_IMAGE_KERNEL_BOKEH = VN_IMAGE_MAKE_KERNEL( 0, 15 ), // bokeh
+ VN_IMAGE_KERNEL_GAUSSIAN = VN_IMAGE_MAKE_KERNEL( 1, 16 ), // gaussian interpolation
+ VN_IMAGE_KERNEL_COVERAGE = VN_IMAGE_MAKE_KERNEL( 1, 17 ), // coverage kernel
+
+ // ...
+
+ VN_IMAGE_KERNEL_TYPE_DWORD = 0x7FFFFFFF
+};
+
+//
+// Kernel Directions
+//
+// Some kernels are separabe, and others are not. For those that are separable,
+// it can be faster to perform two 1D passes rather than a single 2D pass. Each
+// separable kernel will identify itself as such in the comments above the kernel
+// interface.
+//
+
+enum VN_IMAGE_KERNEL_DIRECTION
+{
+ VN_IMAGE_KERNEL_1D_HORIZONTAL = 0x0,
+ VN_IMAGE_KERNEL_1D_VERTICAL,
+ VN_IMAGE_KERNEL_2D_COMBINED,
+
+ // ...
+
+ VN_IMAGE_KERNEL_DIRECTION_WORD = 0x7FFF
+};
+
+//
+// Kernel Edgemode
+//
+// When sampling the edge of an image, the kernel must decide how best to handle cases
+// where coordinates seek beyond the addressable pixel range. Edgemode allows the
+// caller to specify whether the kernel should clamp the coordinates or wrap them
+// around to the other side of the image.
+//
+
+enum VN_IMAGE_KERNEL_EDGEMODE
+{
+ VN_IMAGE_KERNEL_EDGE_WRAP = 0x0,
+ VN_IMAGE_KERNEL_EDGE_CLAMP,
+ VN_IMAGE_KERNEL_EDGE_REFLECT,
+
+ // ...
+
+ VN_IMAGE_KERNEL_EDGEMODE_WORD = 0x7FFF
+};
+
+//
+// Simple helper utility to check if a kernel is separable.
+//
+
+inline BOOL vnIsSeparableKernel( VN_IMAGE_KERNEL_TYPE uiKernel )
+{
+ return !!( 0x80000000 & uiKernel );
+}
+
+//
+// SampleImage
+//
+// This function samples a source image at a particular location, using the prescribed filter
+// kernel, and returns the result in the same format as the source image.
+//
+// Parameters:
+//
+// pSrcImage: The read-only source image to sample.
+//
+// uiKernel: Indicates the filter kernel to use when sampling.
+//
+// uiDirection: Indicates whether the kernel should be run horizontally or vertically
+// over a 1D space, or whether the kernel should be run over
+// the 2D surface of the image.
+//
+// uiX: The x coordinate of the pixel to sample from the source image.
+//
+// uiY: The y coordinate of the pixel to sample from the source image.
+//
+// uiRadius: The radius to supply for the filter kernel.
+//
+// pRawOutput: A pointer to a pixel in a destination buffer that should receive
+// sampled output. In most cases, this will refer to a pixel in a
+// destination image (of matching format to the source image), that
+// should store the sampled result.
+//
+// Supported Image Formats:
+//
+// All image formats are supported.
+//
+
+VN_STATUS vnSampleImage( CONST CVImage & pSrcImage,
+ VN_IMAGE_KERNEL_TYPE uiKernel,
+ VN_IMAGE_KERNEL_DIRECTION uiDirection,
+ FLOAT32 fX,
+ FLOAT32 fY,
+ FLOAT32 fRadius,
+ UINT8 * pRawOutput );
+
+#endif // __VN_IMAGE_SAMPLE_H__
diff --git a/src/libs/image-resampler/vnImagine.h b/src/libs/image-resampler/vnImagine.h
new file mode 100644
index 000000000..1277f8b2c
--- /dev/null
+++ b/src/libs/image-resampler/vnImagine.h
@@ -0,0 +1,53 @@
+
+//
+// Copyright (c) 2002-2009 Joe Bertolami. All Right Reserved.
+//
+// vnImagine.h
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice, this
+// list of conditions and the following disclaimer.
+//
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Description:
+//
+// Imagine ("Image Engine") is a 256 bit image processing library that provides
+// optimized routines for a very large assortment of operations. This package
+// contains an early reference version of the resampling routines from the library.
+//
+// This source code is provided for educational purposes and lacks significant
+// optimizations that are available in the full version of Imagine. To download
+// the complete version visit the website below.
+//
+// Additional Information:
+//
+// For more information, visit http://www.bertolami.com.
+//
+
+#ifndef __VN_IMAGINE_H__
+#define __VN_IMAGINE_H__
+
+#include "Base/vnBase.h"
+#include "Base/vnImage.h"
+#include "Base/vnImageFormat.h"
+
+#include "Operators/vnImageResize.h"
+#include "Operators/vnImageScale.h"
+
+#endif // __VN_IMAGINE_H__