diff --git a/CHANGELOG.md b/CHANGELOG.md
index 8e08415..715e727 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,3 +1,6 @@
+##### 1.2.4:
+ Fixed regression for relative file paths.
+
##### 1.2.3:
Fixed FFSAR_NUM, FFSAR_DEN, FFSAR.
diff --git a/msvc/D2VSource.vcxproj b/msvc/D2VSource.vcxproj
index ef11e66..2573cfa 100644
--- a/msvc/D2VSource.vcxproj
+++ b/msvc/D2VSource.vcxproj
@@ -101,11 +101,11 @@
AnySuitable
true
Speed
- true
true
true
Precise
stdcpp17
+ true
MachineX86
@@ -113,6 +113,7 @@
Windows
true
true
+ UseLinkTimeCodeGeneration
@@ -121,16 +122,18 @@
AnySuitable
true
Speed
- true
true
true
stdcpp17
+ true
+ Precise
true
true
+ UseLinkTimeCodeGeneration
diff --git a/src/AVISynthAPI.cpp b/src/AVISynthAPI.cpp
index cbe8790..5dcd0e3 100644
--- a/src/AVISynthAPI.cpp
+++ b/src/AVISynthAPI.cpp
@@ -26,13 +26,13 @@
*
*/
-#include
-#include
-#include
#include
-#include
+#include
+#include
#include
#include
+#include
+#include
#include
#include "AVISynthAPI.h"
@@ -48,7 +48,7 @@
#define VERSION "D2VSource 1.2.3"
-bool PutHintingData(uint8_t *video, uint32_t hint)
+bool PutHintingData(uint8_t* video, uint32_t hint)
{
constexpr uint32_t MAGIC_NUMBER = 0xdeadbeef;
@@ -86,7 +86,7 @@ static void show_info(int n, CMPEG2Decoder& d, PVideoFrame& frame,
uint32_t gop = 0;
do {
if (raw >= d.GOPList[gop].number)
- if (raw < d.GOPList[static_cast(gop)+1].number)
+ if (raw < d.GOPList[static_cast(gop) + 1].number)
break;
} while (++gop < d.GOPList.size() - 1);
@@ -109,7 +109,7 @@ static void show_info(int n, CMPEG2Decoder& d, PVideoFrame& frame,
if (d.info == 1) {
char msg1[1024];
- sprintf_s(msg1,"%s\n"
+ sprintf_s(msg1, "%s\n"
"---------------------------------------\n"
"Source: %s\n"
"Frame Rate: %3.6f fps (%u/%u) %s\n"
@@ -150,7 +150,8 @@ static void show_info(int n, CMPEG2Decoder& d, PVideoFrame& frame,
d.avgquant, d.minquant, d.maxquant);
env->ApplyMessage(&frame, vi, msg1, 150, 0xdfffbf, 0x0, 0x0);
- } else if (d.info == 2) {
+ }
+ else if (d.info == 2) {
dprintf(const_cast("MPEG2DecPlus: %s\n"), VERSION);
dprintf(const_cast("MPEG2DecPlus: Source: %s\n"), d.Infilename[rgop.file].c_str());
dprintf(const_cast("MPEG2DecPlus: Frame Rate: %3.6f fps (%u/%u) %s\n"),
@@ -170,7 +171,8 @@ static void show_info(int n, CMPEG2Decoder& d, PVideoFrame& frame,
dprintf(const_cast("MPEG2DecPlus: Colorimetry: %s (%d)\n"), matrix[rgop.matrix], rgop.matrix);
dprintf(const_cast("MPEG2DecPlus: Quants: %d/%d/%d (avg/min/max)\n"), d.avgquant, d.minquant, d.maxquant);
- } else if (d.info == 3) {
+ }
+ else if (d.info == 3) {
constexpr uint32_t PROGRESSIVE = 0x00000001;
constexpr int COLORIMETRY_SHIFT = 2;
@@ -320,16 +322,16 @@ PVideoFrame __stdcall D2VSource::GetFrame(int n, IScriptEnvironment* env)
d.Decode(n, out);
- if (luminanceFlag )
+ if (luminanceFlag)
luminance_filter(out.y, out.ywidth, out.yheight, out.ypitch, luminanceTable);
if (d.upConv == 2) { // convert 4:2:2 (planar) to 4:4:4 (planar)
env->BitBlt(frame->GetWritePtr(PLANAR_Y), frame->GetPitch(PLANAR_Y),
- bufY, out.ypitch, vi.width, vi.height);
+ bufY, out.ypitch, vi.width, vi.height);
conv422to444(out.u, frame->GetWritePtr(PLANAR_U), out.uvpitch,
- frame->GetPitch(PLANAR_U), vi.width, vi.height);
+ frame->GetPitch(PLANAR_U), vi.width, vi.height);
conv422to444(out.v, frame->GetWritePtr(PLANAR_V), out.uvpitch,
- frame->GetPitch(PLANAR_V), vi.width, vi.height);
+ frame->GetPitch(PLANAR_V), vi.width, vi.height);
}
if (d.info != 0)
@@ -393,7 +395,7 @@ PVideoFrame __stdcall D2VSource::GetFrame(int n, IScriptEnvironment* env)
static void set_user_default(FILE* def, char* d2v, int& idct, bool& showq,
- int& info, int upcnv, bool& i420, int& icc)
+ int& info, int upcnv, bool& i420, int& icc)
{
char buf[512];
auto load_str = [&buf](char* var, const char* name, int len) {
@@ -424,7 +426,7 @@ static void set_user_default(FILE* def, char* d2v, int& idct, bool& showq,
}
};
- while(fgets(buf, 511, def) != 0) {
+ while (fgets(buf, 511, def) != 0) {
load_str(d2v, "d2v=%s", 4);
load_int(idct, "idct=%d", 5);
load_bool(showq, "showQ=%s", 6);
@@ -462,17 +464,17 @@ AVSValue __cdecl D2VSource::create(AVSValue args, void*, IScriptEnvironment* env
// check for uninitialised strings
if (strlen(d2v) >= _MAX_PATH) d2v[0] = 0;
- D2VSource *dec = new D2VSource( args[0].AsString(d2v),
- args[1].AsInt(idct),
- args[2].AsBool(showQ),
- args[3].AsInt(info),
- args[4].AsInt(upConv),
- args[5].AsBool(i420),
- iCC,
- env );
+ D2VSource* dec = new D2VSource(args[0].AsString(d2v),
+ args[1].AsInt(idct),
+ args[2].AsBool(showQ),
+ args[3].AsInt(info),
+ args[4].AsInt(upConv),
+ args[5].AsBool(i420),
+ iCC,
+ env);
// Only bother invoking crop if we have to.
auto& d = *dec->decoder;
- if (d.Clip_Top || d.Clip_Bottom || d.Clip_Left || d.Clip_Right ||
+ if (d.Clip_Top || d.Clip_Bottom || d.Clip_Left || d.Clip_Right ||
// This is cheap but it works. The intent is to allow the
// display size to be different from the encoded size, while
// not requiring massive revisions to the code. So we detect the
@@ -504,7 +506,7 @@ AVSValue __cdecl D2VSource::create(AVSValue args, void*, IScriptEnvironment* env
const AVS_Linkage* AVS_linkage = nullptr;
extern "C" __declspec(dllexport) const char* __stdcall
-AvisynthPluginInit3(IScriptEnvironment* env, const AVS_Linkage* const vectors)
+AvisynthPluginInit3(IScriptEnvironment * env, const AVS_Linkage* const vectors)
{
AVS_linkage = vectors;
diff --git a/src/AVISynthAPI.h b/src/AVISynthAPI.h
index 152f6fe..48fddf2 100644
--- a/src/AVISynthAPI.h
+++ b/src/AVISynthAPI.h
@@ -32,24 +32,24 @@
#include "MPEG2Decoder.h"
-class D2VSource: public IClip {
+class D2VSource : public IClip {
VideoInfo vi;
//int _PP_MODE;
- uint8_t *bufY, *bufU, *bufV; // for 4:2:2 input support
+ uint8_t* bufY, * bufU, * bufV; // for 4:2:2 input support
CMPEG2Decoder* decoder;
bool luminanceFlag;
uint8_t luminanceTable[256];
bool has_at_least_v8;
public:
- D2VSource(const char* d2v, int idct, bool showQ, int _info, int _upConv, bool _i420, int iCC, IScriptEnvironment* env);
- ~D2VSource() {}
- PVideoFrame __stdcall GetFrame(int n, IScriptEnvironment* env);
- bool __stdcall GetParity(int n);
- void __stdcall GetAudio(void* buf, int64_t start, int64_t count, IScriptEnvironment* env) {};
- const VideoInfo& __stdcall GetVideoInfo() { return vi; }
- int __stdcall SetCacheHints(int hints, int) { return hints == CACHE_GET_MTMODE ? MT_SERIALIZED : 0; };
- static AVSValue __cdecl create(AVSValue args, void*, IScriptEnvironment* env);
+ D2VSource(const char* d2v, int idct, bool showQ, int _info, int _upConv, bool _i420, int iCC, IScriptEnvironment* env);
+ ~D2VSource() {}
+ PVideoFrame __stdcall GetFrame(int n, IScriptEnvironment* env);
+ bool __stdcall GetParity(int n);
+ void __stdcall GetAudio(void* buf, int64_t start, int64_t count, IScriptEnvironment* env) {};
+ const VideoInfo& __stdcall GetVideoInfo() { return vi; }
+ int __stdcall SetCacheHints(int hints, int) { return hints == CACHE_GET_MTMODE ? MT_SERIALIZED : 0; };
+ static AVSValue __cdecl create(AVSValue args, void*, IScriptEnvironment* env);
};
#endif
diff --git a/src/MPEG2Decoder.cpp b/src/MPEG2Decoder.cpp
index 7ef49be..0dd8f61 100644
--- a/src/MPEG2Decoder.cpp
+++ b/src/MPEG2Decoder.cpp
@@ -20,19 +20,19 @@
*
*/
-/*
- lots of code modified for YV12 / MPEG2Dec3 - MarcFD
-*/
+ /*
+ lots of code modified for YV12 / MPEG2Dec3 - MarcFD
+ */
#include
-#include
-#include
#include
+#include
+#include
#include "MPEG2Decoder.h"
+#include "idct.h"
#include "mc.h"
#include "misc.h"
-#include "idct.h"
static constexpr int ChromaFormat[4] = {
@@ -47,8 +47,8 @@ static void validate(bool cond, const char* msg)
// Open function modified by Donald Graft as part of fix for dropped frames and random frame access.
// change function to constructor - chikuzen.
CMPEG2Decoder::CMPEG2Decoder(FILE* d2vf, const char* path, int _idct, int icc, int upconv, int _info, bool showq, bool _i420, int _cpu_flags) :
- Rdmax(nullptr),
- CurrentBfr(0),
+ Rdmax(nullptr),
+ CurrentBfr(0),
NextBfr(0),
BitsLeft(0),
Val(0),
@@ -93,23 +93,23 @@ CMPEG2Decoder::CMPEG2Decoder(FILE* d2vf, const char* path, int _idct, int icc, i
validate(fgets(buf, 2047, d2vf) == nullptr, "Invalid D2V file, it's empty!");
validate(strstr(buf, "DGIndexProjectFile") == nullptr,
- "The input file is not a D2V project file.");
+ "The input file is not a D2V project file.");
validate(strncmp(buf, "DGIndexProjectFile16", 20) != 0,
- "DGIndex/MPEG2DecPlus mismatch. You are picking up\n"
- "a version of MPEG2DecPlus, possibly from your plugins directory,\n"
- "that does not match the version of DGIndex used to make the D2V\n"
- "file. Search your hard disk for all copies of MPEG2DecPlus.dll\n"
- "and delete or rename all of them except for the one that\n"
- "has the same version number as the DGIndex.exe that was used\n"
- "to make the D2V file.");
+ "DGIndex/MPEG2DecPlus mismatch. You are picking up\n"
+ "a version of MPEG2DecPlus, possibly from your plugins directory,\n"
+ "that does not match the version of DGIndex used to make the D2V\n"
+ "file. Search your hard disk for all copies of MPEG2DecPlus.dll\n"
+ "and delete or rename all of them except for the one that\n"
+ "has the same version number as the DGIndex.exe that was used\n"
+ "to make the D2V file.");
create_file_lists(d2vf, path, buf);
fscanf_s(d2vf, "\nStream_Type=%d\n", &SystemStream_Flag);
if (SystemStream_Flag == 2) {
fscanf_s(d2vf, "MPEG2_Transport_PID=%x,%x,%x\n",
- &MPEG2_Transport_VideoPID, &MPEG2_Transport_AudioPID, &MPEG2_Transport_PCRPID);
+ &MPEG2_Transport_VideoPID, &MPEG2_Transport_AudioPID, &MPEG2_Transport_PCRPID);
fscanf_s(d2vf, "Transport_Packet_Size=%d\n", &TransportPacketSize);
}
fscanf_s(d2vf, "MPEG_Type=%d\n", &mpeg_type);
@@ -150,15 +150,18 @@ void CMPEG2Decoder::setIDCT(int idct)
if (idct == IDCT_REF) {
prefetchTables = prefetch_ref;
idctFunction = idct_ref_sse3;
- } else if (idct == IDCT_LLM_FLOAT) {
+ }
+ else if (idct == IDCT_LLM_FLOAT) {
if (!!(cpu_flags & CPUF_AVX2)) {
prefetchTables = prefetch_llm_float_avx2;
idctFunction = idct_llm_float_avx2;
- } else {
+ }
+ else {
prefetchTables = prefetch_llm_float_sse2;
idctFunction = idct_llm_float_sse2;
}
- } else {
+ }
+ else {
prefetchTables = prefetch_ap922;
idctFunction = idct_ap922_sse2;
}
@@ -173,13 +176,13 @@ void CMPEG2Decoder::set_clip_properties()
do {
validate(Fault_Flag == OUT_OF_BITS,
- "Could not find a sequence header in the input stream.");
+ "Could not find a sequence header in the input stream.");
Next_Start_Code();
} while (Get_Bits(32) != SEQUENCE_HEADER_CODE);
Sequence_Header();
validate(chroma_format == CHROMA444,
- "currently unsupported input color format (4:4:4)");
+ "currently unsupported input color format (4:4:4)");
mb_width = (horizontal_size + 15) / 16;
mb_height = progressive_sequence ? (vertical_size + 15) / 16 : 2 * ((vertical_size + 31) / 32);
@@ -205,7 +208,7 @@ void CMPEG2Decoder::create_file_lists(FILE* d2vf, const char* path, char* buf)
while (file_limit-- > 0) {
fgets(buf, 2047, d2vf);
- auto temp = std::string(buf);
+ std::string temp{ std::string(buf) };
temp.pop_back(); // Strip newline.
if (temp.find('\r') != std::string::npos) {
@@ -215,23 +218,23 @@ void CMPEG2Decoder::create_file_lists(FILE* d2vf, const char* path, char* buf)
const std::filesystem::path p(temp);
if (p.is_relative()) {
- std::filesystem::path cur_dir = std::filesystem::path(path).parent_path().generic_string() + "/" + p.generic_string();
- const std::string d2v_stem = std::filesystem::canonical(cur_dir).generic_string();
+ const std::string d2v_stem{ std::filesystem::canonical(std::filesystem::path(path).parent_path().generic_string() + ((std::filesystem::path(path).has_parent_path()) ? "/" : "") + p.generic_string()).generic_string() };
#ifdef _WIN32
int in;
_sopen_s(&in, d2v_stem.c_str(), _O_RDONLY | _O_BINARY, _SH_DENYWR, 0);
#else
- int in = open(d2v_stem.c_str(), O_RDONLY);
+ int in{ open(d2v_stem.c_str(), O_RDONLY) };
#endif
validate(in == -1, "Could not open one of the input files.");
Infile.emplace_back(in);
Infilename.emplace_back(d2v_stem);
- } else {
+ }
+ else {
#ifdef _WIN32
int in;
_sopen_s(&in, temp.c_str(), _O_RDONLY | _O_BINARY, _SH_DENYWR, 0);
#else
- int in = open(temp.c_str(), O_RDONLY);
+ int in{ open(temp.c_str(), O_RDONLY) };
#endif
validate(in == -1, "Could not open one of the input files.");
Infile.emplace_back(in);
@@ -246,17 +249,17 @@ void CMPEG2Decoder::create_gop_and_frame_lists(FILE* d2vf, char* buf)
int repeat_on, repeat_off, repeat_init;
ntsc = film = top = bottom = mapping = repeat_on = repeat_off = repeat_init = 0;
HaveRFFs = false;
-
+
// These start with sizes of 1000000 (the old fixed default). If it
// turns out that that is too small, the memory spaces are enlarged
// 500000 at a time using realloc. -- tritical May 16, 2005
DirectAccess.reserve(1000000);
FrameList.resize(1000000);
GOPList.reserve(200000);
-
+
fgets(buf, 2047, d2vf);
char* buf_p = buf;
-
+
while (true) {
sscanf_s(buf_p, "%x", &type);
if (type == 0xff)
@@ -276,7 +279,7 @@ void CMPEG2Decoder::create_gop_and_frame_lists(FILE* d2vf, char* buf)
while (*buf_p++ != ' ');
while (*buf_p++ != ' ');
GOPList.emplace_back(film, matrix, file, position, ic, type);
-
+
sscanf_s(buf_p, "%x", &type);
}
tff = (type & 0x2) >> 1;
@@ -285,14 +288,14 @@ void CMPEG2Decoder::create_gop_and_frame_lists(FILE* d2vf, char* buf)
else
rff = type & 0x1;
if (FO_Flag != FO_FILM && FO_Flag != FO_RAW && rff) HaveRFFs = true;
-
+
if (!film) {
if (tff)
Field_Order = 1;
else
Field_Order = 0;
}
-
+
size_t listsize = FrameList.size() - 2;
if (mapping >= listsize || ntsc >= listsize || film >= listsize) {
FrameList.resize(listsize + 500002);
@@ -306,26 +309,26 @@ void CMPEG2Decoder::create_gop_and_frame_lists(FILE* d2vf, char* buf)
if (repeat_init)
{
- if (repeat_off-repeat_on == 5)
+ if (repeat_off - repeat_on == 5)
{
repeat_on = repeat_off = 0;
}
else
{
FrameList[mapping].top = FrameList[mapping].bottom = film;
- mapping ++;
+ mapping++;
}
-
- if (repeat_on-repeat_off == 5)
+
+ if (repeat_on - repeat_off == 5)
{
repeat_on = repeat_off = 0;
FrameList[mapping].top = FrameList[mapping].bottom = film;
- mapping ++;
+ mapping++;
}
}
else
{
- if (repeat_off-repeat_on == 3)
+ if (repeat_off - repeat_on == 3)
{
repeat_on = repeat_off = 0;
repeat_init = 1;
@@ -333,15 +336,15 @@ void CMPEG2Decoder::create_gop_and_frame_lists(FILE* d2vf, char* buf)
else
{
FrameList[mapping].top = FrameList[mapping].bottom = film;
- mapping ++;
+ mapping++;
}
- if (repeat_on-repeat_off == 3)
+ if (repeat_on - repeat_off == 3)
{
repeat_on = repeat_off = 0;
repeat_init = 1;
FrameList[mapping].top = FrameList[mapping].bottom = film;
- mapping ++;
+ mapping++;
}
}
}
@@ -397,7 +400,7 @@ void CMPEG2Decoder::create_gop_and_frame_lists(FILE* d2vf, char* buf)
FrameList[ntsc].bottom = film;
bottom = 1;
}
-
+
if (top && bottom)
{
top = bottom = 0;
@@ -429,12 +432,13 @@ void CMPEG2Decoder::create_gop_and_frame_lists(FILE* d2vf, char* buf)
GOPList.shrink_to_fit();
DirectAccess.shrink_to_fit();
- if (FO_Flag==FO_FILM) {
- while (FrameList[mapping-1].top >= film)
+ if (FO_Flag == FO_FILM) {
+ while (FrameList[mapping - 1].top >= film)
--mapping;
FrameList.resize(mapping);
- } else {
- while ((FrameList[ntsc-1].top >= film) || (FrameList[ntsc-1].bottom >= film))
+ }
+ else {
+ while ((FrameList[ntsc - 1].top >= film) || (FrameList[ntsc - 1].bottom >= film))
--ntsc;
FrameList.resize(ntsc);
}
@@ -472,9 +476,9 @@ void CMPEG2Decoder::destroy()
void CMPEG2Decoder::allocate_buffers()
{
- QP = (int*)_aligned_malloc(sizeof(int)*mb_width*mb_height, 32);
- backwardQP = (int*)_aligned_malloc(sizeof(int)*mb_width*mb_height, 32);
- auxQP = (int*)_aligned_malloc(sizeof(int)*mb_width*mb_height, 32);
+ QP = (int*)_aligned_malloc(sizeof(int) * mb_width * mb_height, 32);
+ backwardQP = (int*)_aligned_malloc(sizeof(int) * mb_width * mb_height, 32);
+ auxQP = (int*)_aligned_malloc(sizeof(int) * mb_width * mb_height, 32);
if (!QP || !backwardQP || !auxQP) {
destroy();
throw std::runtime_error("failed to allocate QP buffers.");
@@ -513,7 +517,8 @@ void CMPEG2Decoder::allocate_buffers()
try {
auxFrame1 = new YV12PICT(Coded_Picture_Height, Coded_Picture_Width, cf);
auxFrame2 = new YV12PICT(Coded_Picture_Height, Coded_Picture_Width, cf);
- } catch (std::runtime_error& e) {
+ }
+ catch (std::runtime_error& e) {
destroy();
throw e;
}
@@ -796,8 +801,8 @@ void CMPEG2Decoder::Decode(uint32_t frame, YV12PICT& dst)
__forceinline void CMPEG2Decoder::copy_all(YV12PICT& src, YV12PICT& dst)
{
- int t = (upConv > 0 && chroma_format == 1) ? Chroma_Height*2 : Chroma_Height;
- fast_copy(src.y, src.ypitch, dst.y, dst.ypitch, dst.ywidth,Coded_Picture_Height);
+ int t = (upConv > 0 && chroma_format == 1) ? Chroma_Height * 2 : Chroma_Height;
+ fast_copy(src.y, src.ypitch, dst.y, dst.ypitch, dst.ywidth, Coded_Picture_Height);
fast_copy(src.u, src.uvpitch, dst.u, dst.uvpitch, dst.uvwidth, t);
fast_copy(src.v, src.uvpitch, dst.v, dst.uvpitch, dst.uvwidth, t);
}
@@ -834,11 +839,11 @@ copy_oddeven(const uint8_t* odd, const size_t opitch, const uint8_t* even,
} while (--height != 0);
}
-void CMPEG2Decoder::CopyTopBot(YV12PICT *odd, YV12PICT *even, YV12PICT *dst)
+void CMPEG2Decoder::CopyTopBot(YV12PICT* odd, YV12PICT* even, YV12PICT* dst)
{
- int tChroma_Height = (upConv > 0 && chroma_format == 1) ? Chroma_Height*2 : Chroma_Height;
- copy_oddeven(odd->y,odd->ypitch*2,even->y+even->ypitch,even->ypitch*2,dst->y,dst->ypitch,dst->ywidth,Coded_Picture_Height>>1);
- copy_oddeven(odd->u,odd->uvpitch*2,even->u+even->uvpitch,even->uvpitch*2,dst->u,dst->uvpitch,dst->uvwidth,tChroma_Height>>1);
- copy_oddeven(odd->v,odd->uvpitch*2,even->v+even->uvpitch,even->uvpitch*2,dst->v,dst->uvpitch,dst->uvwidth,tChroma_Height>>1);
+ int tChroma_Height = (upConv > 0 && chroma_format == 1) ? Chroma_Height * 2 : Chroma_Height;
+ copy_oddeven(odd->y, odd->ypitch * 2, even->y + even->ypitch, even->ypitch * 2, dst->y, dst->ypitch, dst->ywidth, Coded_Picture_Height >> 1);
+ copy_oddeven(odd->u, odd->uvpitch * 2, even->u + even->uvpitch, even->uvpitch * 2, dst->u, dst->uvpitch, dst->uvwidth, tChroma_Height >> 1);
+ copy_oddeven(odd->v, odd->uvpitch * 2, even->v + even->uvpitch, even->uvpitch * 2, dst->v, dst->uvpitch, dst->uvwidth, tChroma_Height >> 1);
}
#endif
diff --git a/src/MPEG2Decoder.h b/src/MPEG2Decoder.h
index 5e68364..6b8d2dc 100644
--- a/src/MPEG2Decoder.h
+++ b/src/MPEG2Decoder.h
@@ -1,12 +1,12 @@
#ifndef MPEG2DECODER_H
#define MPEG2DECODER_H
+#include
#include
#include
-#include
+#include
#include
#include
-#include
#include "yv12pict.h"
#ifndef _WIN32
@@ -18,34 +18,34 @@
/* code definition */
enum {
- PICTURE_START_CODE = 0x100,
- SLICE_START_CODE_MIN = 0x101,
- SLICE_START_CODE_MAX = 0x1AF,
- USER_DATA_START_CODE = 0x1B2,
- SEQUENCE_HEADER_CODE = 0x1B3,
- EXTENSION_START_CODE = 0x1B5,
- SEQUENCE_END_CODE = 0x1B7,
- GROUP_START_CODE = 0x1B8,
-
- SYSTEM_END_CODE = 0x1B9,
- PACK_START_CODE = 0x1BA,
- SYSTEM_START_CODE = 0x1BB,
- PRIVATE_STREAM_1 = 0x1BD,
+ PICTURE_START_CODE = 0x100,
+ SLICE_START_CODE_MIN = 0x101,
+ SLICE_START_CODE_MAX = 0x1AF,
+ USER_DATA_START_CODE = 0x1B2,
+ SEQUENCE_HEADER_CODE = 0x1B3,
+ EXTENSION_START_CODE = 0x1B5,
+ SEQUENCE_END_CODE = 0x1B7,
+ GROUP_START_CODE = 0x1B8,
+
+ SYSTEM_END_CODE = 0x1B9,
+ PACK_START_CODE = 0x1BA,
+ SYSTEM_START_CODE = 0x1BB,
+ PRIVATE_STREAM_1 = 0x1BD,
VIDEO_ELEMENTARY_STREAM = 0x1E0,
};
/* extension start code IDs */
enum {
- SEQUENCE_EXTENSION_ID = 1,
+ SEQUENCE_EXTENSION_ID = 1,
SEQUENCE_DISPLAY_EXTENSION_ID = 2,
- QUANT_MATRIX_EXTENSION_ID = 3,
- COPYRIGHT_EXTENSION_ID = 4,
- PICTURE_DISPLAY_EXTENSION_ID = 7,
- PICTURE_CODING_EXTENSION_ID = 8,
+ QUANT_MATRIX_EXTENSION_ID = 3,
+ COPYRIGHT_EXTENSION_ID = 4,
+ PICTURE_DISPLAY_EXTENSION_ID = 7,
+ PICTURE_CODING_EXTENSION_ID = 8,
};
enum {
- ZIG_ZAG = 0,
+ ZIG_ZAG = 0,
MB_WEIGHT = 32,
MB_CLASS4 = 64,
};
@@ -58,16 +58,16 @@ enum {
};
enum {
- TOP_FIELD = 1,
- BOTTOM_FIELD = 2,
+ TOP_FIELD = 1,
+ BOTTOM_FIELD = 2,
FRAME_PICTURE = 3,
};
enum {
MC_FIELD = 1,
MC_FRAME = 2,
- MC_16X8 = 2,
- MC_DMV = 3,
+ MC_16X8 = 2,
+ MC_DMV = 3,
};
enum {
@@ -85,16 +85,16 @@ enum {
enum {
- IDCT_AUTO = 0,
+ IDCT_AUTO = 0,
IDCT_AP922_INT = 3,
IDCT_LLM_FLOAT = 4,
- IDCT_REF = 5,
+ IDCT_REF = 5,
};
enum {
FO_NONE = 0,
FO_FILM = 1,
- FO_RAW = 2,
+ FO_RAW = 2,
};
// Fault_Flag values
@@ -153,9 +153,9 @@ class CMPEG2Decoder
void Next_Start_Code(void);
std::vector ReadBuffer;
- uint8_t *Rdbfr, *Rdptr, *Rdmax;
+ uint8_t* Rdbfr, * Rdptr, * Rdmax;
uint32_t CurrentBfr, NextBfr, BitsLeft, Val, Read;
- uint8_t *buffer_invalid;
+ uint8_t* buffer_invalid;
// gethdr.cpp
int Get_Hdr(void);
@@ -205,24 +205,24 @@ class CMPEG2Decoder
void form_predictions(int bx, int by, int macroblock_type, int motion_type,
int PMV[2][2][2], int motion_vertical_field_select[2][2], int dmvector[2]);
- void form_prediction(uint8_t *src[], int sfield, uint8_t *dst[], int dfield,
+ void form_prediction(uint8_t* src[], int sfield, uint8_t* dst[], int dfield,
int lx, int lx2, int w, int h, int x, int y, int dx, int dy, int average_flag);
// motion.cpp
void motion_vectors(int PMV[2][2][2], int dmvector[2], int motion_vertical_field_select[2][2],
int s, int motion_vector_count, int mv_format,
int h_r_size, int v_r_size, int dmv, int mvscale);
- void Dual_Prime_Arithmetic(int DMV[][2], int *dmvector, int mvx, int mvy);
+ void Dual_Prime_Arithmetic(int DMV[][2], int* dmvector, int mvx, int mvy);
- void motion_vector(int *PMV, int *dmvector, int h_r_size, int v_r_size,
+ void motion_vector(int* PMV, int* dmvector, int h_r_size, int v_r_size,
int dmv, int mvscale, int full_pel_vector);
- void decode_motion_vector(int *pred, int r_size, int motion_code,
+ void decode_motion_vector(int* pred, int r_size, int motion_code,
int motion_residualesidual, int full_pel_vector);
int Get_motion_code(void);
int Get_dmvector(void);
// store.cpp
- void assembleFrame(uint8_t *src[], int pf, YV12PICT& dst);
+ void assembleFrame(uint8_t* src[], int pf, YV12PICT& dst);
// decoder operation control flags
int Fault_Flag;
@@ -256,17 +256,17 @@ class CMPEG2Decoder
int alternate_scan;
int quantizer_scale;
- short *block[8], *p_block[8];
+ short* block[8], * p_block[8];
int pf_backward, pf_forward, pf_current;
// global values
- uint8_t *backward_reference_frame[3], *forward_reference_frame[3];
- uint8_t *auxframe[3], *current_frame[3];
+ uint8_t* backward_reference_frame[3], * forward_reference_frame[3];
+ uint8_t* auxframe[3], * current_frame[3];
//uint8_t *u422, *v422;
- YV12PICT *auxFrame1;
- YV12PICT *auxFrame2;
- YV12PICT *saved_active;
- YV12PICT *saved_store;
+ YV12PICT* auxFrame1;
+ YV12PICT* auxFrame2;
+ YV12PICT* saved_active;
+ YV12PICT* saved_store;
enum {
ELEMENTARY_STREAM = 0,
@@ -312,7 +312,7 @@ class CMPEG2Decoder
void copy_top(YV12PICT& src, YV12PICT& dst);
void copy_bottom(YV12PICT& src, YV12PICT& dst);
- int *QP, *backwardQP, *auxQP;
+ int* QP, * backwardQP, * auxQP;
uint32_t prev_frame;
std::vector DirectAccess;
@@ -376,7 +376,8 @@ __forceinline uint32_t CMPEG2Decoder::Show_Bits(uint32_t N)
{
if (N <= BitsLeft) {
return (CurrentBfr << (32 - BitsLeft)) >> (32 - N);;
- } else {
+ }
+ else {
N -= BitsLeft;
int shift = 32 - BitsLeft;
//return (((CurrentBfr << shift) >> shift) << N) + (NextBfr >> (32 - N));;
@@ -390,7 +391,8 @@ __forceinline uint32_t CMPEG2Decoder::Get_Bits(uint32_t N)
Val = (CurrentBfr << (32 - BitsLeft)) >> (32 - N);
BitsLeft -= N;
return Val;
- } else {
+ }
+ else {
N -= BitsLeft;
int shift = 32 - BitsLeft;
Val = (CurrentBfr << shift) >> shift;
@@ -408,7 +410,8 @@ __forceinline void CMPEG2Decoder::Flush_Buffer(uint32_t N)
{
if (N < BitsLeft) {
BitsLeft -= N;
- } else {
+ }
+ else {
CurrentBfr = NextBfr;
BitsLeft += 32 - N;
Fill_Next();
@@ -434,35 +437,37 @@ __forceinline void CMPEG2Decoder::Fill_Next()
if (Rdptr >= Rdmax)
Next_Packet();
NextBfr |= Get_Byte();
- } else if (Rdptr < Rdbfr + BUFFER_SIZE - 3) {
+ }
+ else if (Rdptr < Rdbfr + BUFFER_SIZE - 3) {
//NextBfr = (*Rdptr << 24) + (*(Rdptr+1) << 16) + (*(Rdptr+2) << 8) + *(Rdptr+3);
NextBfr = _byteswap_ulong(*reinterpret_cast(Rdptr));
Rdptr += 4;
- } else {
+ }
+ else {
switch (Rdbfr + BUFFER_SIZE - Rdptr) {
- case 1:
- NextBfr = *Rdptr++ << 24;
- Fill_Buffer();
- NextBfr |= (Rdptr[0] << 16) | (Rdptr[1] << 8) | Rdptr[2];
- Rdptr += 3;
- break;
- case 2:
- NextBfr = (Rdptr[0] << 24) | (Rdptr[1] << 16);
- Rdptr += 2;
- Fill_Buffer();
- NextBfr |= (Rdptr[0] << 8) | Rdptr[1];
- Rdptr += 2;
- break;
- case 3:
- NextBfr = (Rdptr[0] << 24) | (Rdptr[1] << 16) | (Rdptr[2] << 8);
- Rdptr += 3;
- Fill_Buffer();
- NextBfr |= *Rdptr++;
- break;
- default:
- Fill_Buffer();
- NextBfr = _byteswap_ulong(*reinterpret_cast(Rdptr));
- Rdptr += 4;
+ case 1:
+ NextBfr = *Rdptr++ << 24;
+ Fill_Buffer();
+ NextBfr |= (Rdptr[0] << 16) | (Rdptr[1] << 8) | Rdptr[2];
+ Rdptr += 3;
+ break;
+ case 2:
+ NextBfr = (Rdptr[0] << 24) | (Rdptr[1] << 16);
+ Rdptr += 2;
+ Fill_Buffer();
+ NextBfr |= (Rdptr[0] << 8) | Rdptr[1];
+ Rdptr += 2;
+ break;
+ case 3:
+ NextBfr = (Rdptr[0] << 24) | (Rdptr[1] << 16) | (Rdptr[2] << 8);
+ Rdptr += 3;
+ Fill_Buffer();
+ NextBfr |= *Rdptr++;
+ break;
+ default:
+ Fill_Buffer();
+ NextBfr = _byteswap_ulong(*reinterpret_cast(Rdptr));
+ Rdptr += 4;
}
}
}
@@ -498,7 +503,7 @@ __forceinline uint32_t CMPEG2Decoder::Get_Byte()
__forceinline uint32_t CMPEG2Decoder::Get_Short()
{
uint32_t i = Get_Byte();
- return (i<<8) + Get_Byte();
+ return (i << 8) + Get_Byte();
}
diff --git a/src/color_convert.cpp b/src/color_convert.cpp
index c058757..d1f327c 100644
--- a/src/color_convert.cpp
+++ b/src/color_convert.cpp
@@ -24,7 +24,7 @@ MPEG2Dec's colorspace convertions Copyright (C) Chia-chen Kuo - April 2001
#if 0
// C implementation
-void conv420to422I_c(const uint8_t *src, uint8_t *dst, int src_pitch, int dst_pitch, int width, int height)
+void conv420to422I_c(const uint8_t* src, uint8_t* dst, int src_pitch, int dst_pitch, int width, int height)
{
const uint8_t* s0 = src;
const uint8_t* s1 = src + src_pitch;
@@ -85,7 +85,7 @@ avg_weight_3_5(const __m128i& x, const __m128i& y, const __m128i& four)
}
-void conv420to422I(const uint8_t *src, uint8_t *dst, int src_pitch, int dst_pitch, int width, int height)
+void conv420to422I(const uint8_t* src, uint8_t* dst, int src_pitch, int dst_pitch, int width, int height)
{
const uint8_t* src0 = src;
const uint8_t* src1 = src + src_pitch;
@@ -143,7 +143,7 @@ void conv420to422I(const uint8_t *src, uint8_t *dst, int src_pitch, int dst_pitc
#if 0
// C implementation
-void conv420to422P_c(const uint8_t *src, uint8_t *dst, int src_pitch, int dst_pitch,
+void conv420to422P_c(const uint8_t* src, uint8_t* dst, int src_pitch, int dst_pitch,
int width, int height)
{
const uint8_t* s0 = src;
@@ -183,7 +183,7 @@ void conv420to422P_c(const uint8_t *src, uint8_t *dst, int src_pitch, int dst_pi
#endif
-void conv420to422P(const uint8_t *src, uint8_t *dst, int src_pitch, int dst_pitch,
+void conv420to422P(const uint8_t* src, uint8_t* dst, int src_pitch, int dst_pitch,
int width, int height)
{
const uint8_t* s0 = src;
@@ -247,14 +247,14 @@ void conv420to422P(const uint8_t *src, uint8_t *dst, int src_pitch, int dst_pitc
#if 0
// C implementation
-void conv422to444_c(const uint8_t *src, uint8_t *dst, int src_pitch, int dst_pitch,
+void conv422to444_c(const uint8_t* src, uint8_t* dst, int src_pitch, int dst_pitch,
int width, int height)
{
width /= 2;
for (int y = 0; y < height; ++y) {
for (int x = 0; x < width - 1; ++x) {
- dst[2 * x] = src[x];
+ dst[2 * x] = src[x];
dst[2 * x + 1] = (src[x] + src[x + 1] + 1) / 2;
}
dst[2 * width - 2] = dst[2 * width - 1] = src[width - 1];
@@ -265,7 +265,7 @@ void conv422to444_c(const uint8_t *src, uint8_t *dst, int src_pitch, int dst_pit
#endif
-void conv422to444(const uint8_t *src, uint8_t *dst, int src_pitch, int dst_pitch,
+void conv422to444(const uint8_t* src, uint8_t* dst, int src_pitch, int dst_pitch,
int width, int height)
{
const int right = width - 1;
@@ -292,12 +292,12 @@ void conv422to444(const uint8_t *src, uint8_t *dst, int src_pitch, int dst_pitch
const int64_t mmmask_0001 = 0x0001000100010001;
const int64_t mmmask_0128 = 0x0080008000800080;
-void conv444toRGB24(const uint8_t *py, const uint8_t *pu, const uint8_t *pv,
- uint8_t *dst, int src_pitchY, int src_pitchUV, int dst_pitch, int width,
+void conv444toRGB24(const uint8_t* py, const uint8_t* pu, const uint8_t* pv,
+ uint8_t* dst, int src_pitchY, int src_pitchUV, int dst_pitch, int width,
int height, int matrix, int pc_scale)
{
int64_t RGB_Offset, RGB_Scale, RGB_CBU, RGB_CRV, RGB_CGX;
- int dst_modulo = dst_pitch-(3*width);
+ int dst_modulo = dst_pitch - (3 * width);
if (pc_scale)
{
@@ -365,15 +365,15 @@ void conv444toRGB24(const uint8_t *py, const uint8_t *pu, const uint8_t *pv,
mov ecx, [pv] // ecx = pv
mov edx, [dst] // edx = dst
mov edi, width // edi = width
- xor esi, esi
+ xor esi, esi
pxor mm0, mm0
- convRGB24:
- movd mm1, [eax+esi]
- movd mm3, [ebx+esi]
+ convRGB24 :
+ movd mm1, [eax + esi]
+ movd mm3, [ebx + esi]
punpcklbw mm1, mm0
punpcklbw mm3, mm0
- movd mm5, [ecx+esi]
+ movd mm5, [ecx + esi]
punpcklbw mm5, mm0
movq mm7, [mmmask_0128]
psubw mm3, mm7
@@ -423,8 +423,8 @@ void conv444toRGB24(const uint8_t *py, const uint8_t *pu, const uint8_t *pv,
por mm3, mm5
por mm4, mm6
- movd mm5, [ebx+esi]
- movd mm6, [ecx+esi]
+ movd mm5, [ebx + esi]
+ movd mm6, [ecx + esi]
punpcklbw mm5, mm0
punpcklbw mm6, mm0
movq mm7, [mmmask_0128]
@@ -459,17 +459,17 @@ void conv444toRGB24(const uint8_t *py, const uint8_t *pu, const uint8_t *pv,
psrlq mm3, 40
psllq mm6, 16
por mm3, mm6
- movd [edx], mm1
+ movd[edx], mm1
psrld mm4, 16
psrlq mm5, 24
por mm5, mm4
- movd [edx+4], mm3
+ movd[edx + 4], mm3
add edx, 0x0c
add esi, 0x04
cmp esi, edi
- movd [edx-4], mm5
+ movd[edx - 4], mm5
jl convRGB24
@@ -477,7 +477,7 @@ void conv444toRGB24(const uint8_t *py, const uint8_t *pu, const uint8_t *pv,
add ebx, src_pitchUV
add ecx, src_pitchUV
add edx, dst_modulo
- xor esi, esi
+ xor esi, esi
dec height
jnz convRGB24
@@ -486,7 +486,7 @@ void conv444toRGB24(const uint8_t *py, const uint8_t *pu, const uint8_t *pv,
}
-void conv422PtoYUY2(const uint8_t *py, uint8_t *pu, uint8_t *pv, uint8_t *dst,
+void conv422PtoYUY2(const uint8_t* py, uint8_t* pu, uint8_t* pv, uint8_t* dst,
int pitch1Y, int pitch1UV, int pitch2, int width, int height)
{
width /= 2;
@@ -510,7 +510,7 @@ void conv422PtoYUY2(const uint8_t *py, uint8_t *pu, uint8_t *pv, uint8_t *dst,
}
-void convYUY2to422P(const uint8_t *src, uint8_t *py, uint8_t *pu, uint8_t *pv,
+void convYUY2to422P(const uint8_t* src, uint8_t* py, uint8_t* pu, uint8_t* pv,
int pitch1, int pitch2y, int pitch2uv, int width, int height)
{
width /= 2;
diff --git a/src/color_convert.h b/src/color_convert.h
index 8165350..74a6c16 100644
--- a/src/color_convert.h
+++ b/src/color_convert.h
@@ -3,13 +3,13 @@
#include
-void conv420to422P(const uint8_t *src, uint8_t *dst, int src_pitch, int dst_pitch,
+void conv420to422P(const uint8_t* src, uint8_t* dst, int src_pitch, int dst_pitch,
int width, int height);
-void conv420to422I(const uint8_t *src, uint8_t *dst, int src_pitch, int dst_pitch,
+void conv420to422I(const uint8_t* src, uint8_t* dst, int src_pitch, int dst_pitch,
int width, int height);
-void conv422to444(const uint8_t *src, uint8_t *dst, int src_pitch, int dst_pitch,
+void conv422to444(const uint8_t* src, uint8_t* dst, int src_pitch, int dst_pitch,
int width, int height);
#endif
diff --git a/src/d2vsource.rc b/src/d2vsource.rc
index 528c1df..3850f84 100644
--- a/src/d2vsource.rc
+++ b/src/d2vsource.rc
@@ -1,9 +1,8 @@
#include
-#include
VS_VERSION_INFO VERSIONINFO
-FILEVERSION 1,2,3,0
-PRODUCTVERSION 1,2,3,0
+FILEVERSION 1,2,4,0
+PRODUCTVERSION 1,2,4,0
FILEFLAGSMASK VS_FFI_FILEFLAGSMASK
FILEFLAGS 0x0L
FILEOS VOS__WINDOWS32
@@ -16,11 +15,11 @@ BEGIN
BEGIN
VALUE "Comments", "Modified DGDecode."
VALUE "FileDescription", "D2VSource for AviSynth 2.6 / AviSynth+"
- VALUE "FileVersion", "1.2.3"
+ VALUE "FileVersion", "1.2.4"
VALUE "InternalName", "D2VSource"
VALUE "OriginalFilename", "D2VSource.dll"
VALUE "ProductName", "D2VSource"
- VALUE "ProductVersion", "1.2.3"
+ VALUE "ProductVersion", "1.2.4"
END
END
BLOCK "VarFileInfo"
diff --git a/src/getbit.cpp b/src/getbit.cpp
index 4dc08ea..48ff81e 100644
--- a/src/getbit.cpp
+++ b/src/getbit.cpp
@@ -26,7 +26,7 @@ void CMPEG2Decoder::Initialize_Buffer()
{
Rdptr = Rdbfr + BUFFER_SIZE;
Rdmax = Rdptr;
- buffer_invalid = (uint8_t *)(UINTPTR_MAX);
+ buffer_invalid = (uint8_t*)(UINTPTR_MAX);
if (SystemStream_Flag)
{
@@ -52,7 +52,7 @@ void CMPEG2Decoder::Initialize_Buffer()
{
Fill_Buffer();
- CurrentBfr = (*Rdptr << 24) + (*(Rdptr+1) << 16) + (*(Rdptr+2) << 8) + *(Rdptr+3);
+ CurrentBfr = (*Rdptr << 24) + (*(Rdptr + 1) << 16) + (*(Rdptr + 2) << 8) + *(Rdptr + 3);
Rdptr += 4;
Fill_Next();
@@ -62,7 +62,7 @@ void CMPEG2Decoder::Initialize_Buffer()
}
-struct transport_packet{
+struct transport_packet {
// 1 byte
uint8_t sync_byte; // 8 bslbf
@@ -113,7 +113,7 @@ void CMPEG2Decoder::Next_Transport_Packet()
int Packet_Length; // bytes remaining in MPEG-2 transport packet
int Packet_Header_Length;
uint32_t code;
- transport_packet tp = {0};
+ transport_packet tp = { 0 };
for (;;)
{
@@ -131,19 +131,19 @@ void CMPEG2Decoder::Next_Transport_Packet()
}
// 1) Search for a sync byte. Gives some protection against emulation.
- for(;;)
+ for (;;)
{
if ((tp.sync_byte = Get_Byte()) != 0x47)
continue;
if (Rdptr - Rdbfr > TransportPacketSize)
{
- if (Rdptr[-(TransportPacketSize+1)] == 0x47)
+ if (Rdptr[-(TransportPacketSize + 1)] == 0x47)
break;
}
else if (Rdbfr + Read - Rdptr > TransportPacketSize - static_cast(1))
{
- if (Rdptr[+(TransportPacketSize-1)] == 0x47)
+ if (Rdptr[+(TransportPacketSize - 1)] == 0x47)
break;
}
else
@@ -166,29 +166,29 @@ void CMPEG2Decoder::Next_Transport_Packet()
code = Get_Byte();
--Packet_Length; // decrement the 1 byte we just got;
tp.transport_scrambling_control = (code >> 6) & 0x03;// 2 bslbf
- tp.adaptation_field_control = (code >> 4 ) & 0x03;// 2 bslbf
+ tp.adaptation_field_control = (code >> 4) & 0x03;// 2 bslbf
tp.continuity_counter = code & 0x0F;// 4 uimsbf
// 4) check for early-exit conditions ... (possibly skip packet)
// we don't care about the continuity counter
// if ( tp.continuity_counter != previous_continuity_counter ) ...
- if ( tp.transport_error_indicator ||
- (tp.adaptation_field_control == 0) )
+ if (tp.transport_error_indicator ||
+ (tp.adaptation_field_control == 0))
{
// skip remaining bytes in current packet
- SKIP_TRANSPORT_PACKET_BYTES( Packet_Length )
- continue; // abort, and circle back to top of 'for() loop'
+ SKIP_TRANSPORT_PACKET_BYTES(Packet_Length)
+ continue; // abort, and circle back to top of 'for() loop'
}
// 5) check
- if ( tp.adaptation_field_control == 2 || tp.adaptation_field_control == 3)
+ if (tp.adaptation_field_control == 2 || tp.adaptation_field_control == 3)
{
// adaptation field is present
tp.adaptation_field_length = Get_Byte(); // 8-bits
--Packet_Length; // decrement the 1 byte we just got;
- if ( tp.adaptation_field_length != 0 ) // end of field already?
+ if (tp.adaptation_field_length != 0) // end of field already?
{
// if we made it this far, we no longer need to decrement
// Packet_Length. We took care of it up there!
@@ -204,56 +204,56 @@ void CMPEG2Decoder::Next_Transport_Packet()
tp.adaptation_field_extension_flag = (code >> 0) & 0x01; // 1 bslbf
// skip the remainder of the adaptation_field
- SKIP_TRANSPORT_PACKET_BYTES( tp.adaptation_field_length-1 )
+ SKIP_TRANSPORT_PACKET_BYTES(tp.adaptation_field_length - 1)
} // if ( tp.adaptation_field_length != 0 )
} // if ( tp.adaptation_field_control != 1 )
// we've processed the header, so now just the payload is left...
// video
- if ( tp.pid == MPEG2_Transport_VideoPID && Packet_Length > 0)
+ if (tp.pid == MPEG2_Transport_VideoPID && Packet_Length > 0)
{
#if 0
code = Get_Short();
- code = (code & 0xffff)<<16 | Get_Short();
+ code = (code & 0xffff) << 16 | Get_Short();
Packet_Length = Packet_Length - 4; // remove these two bytes
// Packet start?
- if (code < 0x000001E0 || code > 0x000001EF )
- if (!tp.payload_unit_start_indicator)
- {
- // No, move the buffer-pointer back.
- Rdptr -= 4;
- Packet_Length = Packet_Length + 4; // restore these four bytes
- }
- else
-#endif
- if (tp.payload_unit_start_indicator)
- {
- // YES, pull out PTS
- //Get_Short();
- //Get_Short();
- //Get_Short(); // MPEG2-PES total Packet_Length
- //Get_Byte(); // skip a byte
- Rdptr += 7;
- code = Get_Byte();
- Packet_Header_Length = Get_Byte();
- Packet_Length = Packet_Length - 9; // compensate the bytes we extracted
-
- // get PTS, and skip rest of PES-header
- if (code>=0x80 && Packet_Header_Length > 4 ) // Extension_flag ?
+ if (code < 0x000001E0 || code > 0x000001EF)
+ if (!tp.payload_unit_start_indicator)
{
- // Skip PES_PTS
- //Get_Short();
- //Get_Short();
- Rdptr += 4;
- Get_Byte();
- Packet_Length = Packet_Length - 5;
- SKIP_TRANSPORT_PACKET_BYTES( Packet_Header_Length - static_cast(5) )
+ // No, move the buffer-pointer back.
+ Rdptr -= 4;
+ Packet_Length = Packet_Length + 4; // restore these four bytes
}
else
- SKIP_TRANSPORT_PACKET_BYTES( Packet_Header_Length )
- }
+#endif
+ if (tp.payload_unit_start_indicator)
+ {
+ // YES, pull out PTS
+ //Get_Short();
+ //Get_Short();
+ //Get_Short(); // MPEG2-PES total Packet_Length
+ //Get_Byte(); // skip a byte
+ Rdptr += 7;
+ code = Get_Byte();
+ Packet_Header_Length = Get_Byte();
+ Packet_Length = Packet_Length - 9; // compensate the bytes we extracted
+
+ // get PTS, and skip rest of PES-header
+ if (code >= 0x80 && Packet_Header_Length > 4) // Extension_flag ?
+ {
+ // Skip PES_PTS
+ //Get_Short();
+ //Get_Short();
+ Rdptr += 4;
+ Get_Byte();
+ Packet_Length = Packet_Length - 5;
+ SKIP_TRANSPORT_PACKET_BYTES(Packet_Header_Length - static_cast(5))
+ }
+ else
+ SKIP_TRANSPORT_PACKET_BYTES(Packet_Header_Length)
+ }
Rdmax = Rdptr + Packet_Length;
if (TransportPacketSize == 204)
Rdmax -= 16;
@@ -262,7 +262,7 @@ void CMPEG2Decoder::Next_Transport_Packet()
// fall through case
// skip the remainder of the adaptation_field
- SKIP_TRANSPORT_PACKET_BYTES( Packet_Length )
+ SKIP_TRANSPORT_PACKET_BYTES(Packet_Length)
} // for
}
@@ -329,7 +329,7 @@ void CMPEG2Decoder::Next_PVA_Packet()
{
// The spec is unclear about the significance of the prebytes field.
// It appears to be safe to ignore it.
- PTS = (int) ((Get_Byte() << 24) | (Get_Byte() << 16) | (Get_Byte() << 8) | Get_Byte());
+ PTS = (int)((Get_Byte() << 24) | (Get_Byte() << 16) | (Get_Byte() << 8) | Get_Byte());
Packet_Length -= 4;
}
@@ -345,12 +345,12 @@ void CMPEG2Decoder::Next_PVA_Packet()
void CMPEG2Decoder::Next_Packet()
{
- if ( SystemStream_Flag == 2 ) // MPEG-2 transport packet?
+ if (SystemStream_Flag == 2) // MPEG-2 transport packet?
{
Next_Transport_Packet();
return;
}
- else if ( SystemStream_Flag == 3 ) // PVA packet?
+ else if (SystemStream_Flag == 3) // PVA packet?
{
Next_PVA_Packet();
return;
@@ -360,7 +360,7 @@ void CMPEG2Decoder::Next_Packet()
static int stream_type;
while (true) {
code = Get_Short();
- code = (code<<16) + Get_Short();
+ code = (code << 16) + Get_Short();
// remove system layer byte stuffing
while ((code & 0xffffff00) != 0x00000100) {
@@ -373,11 +373,13 @@ void CMPEG2Decoder::Next_Packet()
if ((Get_Byte() & 0xf0) == 0x20) {
Rdptr += 7; // MPEG1 program stream
stream_type = MPEG1_PROGRAM_STREAM;
- } else {
+ }
+ else {
Rdptr += 8; // MPEG2 program stream
stream_type = MPEG2_PROGRAM_STREAM;
}
- } else if ((code & 0xfffffff0) == VIDEO_ELEMENTARY_STREAM) {
+ }
+ else if ((code & 0xfffffff0) == VIDEO_ELEMENTARY_STREAM) {
Packet_Length = Get_Short();
Rdmax = Rdptr + Packet_Length;
@@ -400,7 +402,8 @@ void CMPEG2Decoder::Next_Packet()
Get_Short();
Get_Short();
Packet_Header_Length += 4;
- } else if ((code & 0xf0) == 0x30) {
+ }
+ else if ((code & 0xf0) == 0x30) {
// PTS/DTS bytes.
Get_Short();
Get_Short();
@@ -410,10 +413,11 @@ void CMPEG2Decoder::Next_Packet()
Packet_Header_Length += 9;
}
return;
- } else {
+ }
+ else {
// MPEG2 program stream.
code = Get_Byte();
- if ((code & 0xc0)==0x80)
+ if ((code & 0xc0) == 0x80)
{
//code = Get_Byte();
++Rdptr;
@@ -426,7 +430,7 @@ void CMPEG2Decoder::Next_Packet()
Rdptr += Packet_Length - 1;
}
}
- else if (code>=SYSTEM_START_CODE)
+ else if (code >= SYSTEM_START_CODE)
{
code = Get_Short();
Rdptr += code;
@@ -438,9 +442,10 @@ void CMPEG2Decoder::Next_Packet()
void CMPEG2Decoder::Next_File()
{
if (File_Flag < static_cast(Infile.size() - 1)) {
- File_Flag ++;
+ File_Flag++;
- } else {
+ }
+ else {
File_Flag = 0;
}
// Even if we ran out of files, we reread the first one, just so
@@ -450,7 +455,7 @@ void CMPEG2Decoder::Next_File()
int bytes = _read(Infile[File_Flag], Rdbfr + Read, BUFFER_SIZE - Read);
if (Read + static_cast(bytes) == BUFFER_SIZE)
// The whole buffer has valid data.
- buffer_invalid = (uint8_t *)(UINTPTR_MAX);
+ buffer_invalid = (uint8_t*)(UINTPTR_MAX);
else
// Point to the first invalid buffer location.
buffer_invalid = Rdbfr + Read + bytes;
diff --git a/src/gethdr.cpp b/src/gethdr.cpp
index a719124..03c07ee 100644
--- a/src/gethdr.cpp
+++ b/src/gethdr.cpp
@@ -29,7 +29,7 @@
#include "MPEG2Decoder.h"
-/* decode headers from one input stream */
+ /* decode headers from one input stream */
int CMPEG2Decoder::Get_Hdr()
{
for (;;)
@@ -86,17 +86,17 @@ __forceinline void CMPEG2Decoder::group_of_pictures_header()
/* ISO/IEC 13818-2 section 6.2.3 */
inline void CMPEG2Decoder::picture_header()
{
- temporal_reference = Get_Bits(10);
+ temporal_reference = Get_Bits(10);
picture_coding_type = Get_Bits(3);
Flush_Buffer(16);//Get_Bits(16); //vbv_delay
- if (picture_coding_type==P_TYPE || picture_coding_type==B_TYPE)
+ if (picture_coding_type == P_TYPE || picture_coding_type == B_TYPE)
{
full_pel_forward_vector = Get_Bits(1);
forward_f_code = Get_Bits(3);
}
- if (picture_coding_type==B_TYPE)
+ if (picture_coding_type == B_TYPE)
{
full_pel_backward_vector = Get_Bits(1);
backward_f_code = Get_Bits(3);
@@ -127,7 +127,7 @@ void CMPEG2Decoder::Sequence_Header()
int i;
horizontal_size = Get_Bits(12);
- vertical_size = Get_Bits(12);
+ vertical_size = Get_Bits(12);
#if 0
Get_Bits(4); //aspect_ratio_information
Get_Bits(4); //frame_rate_code
@@ -142,28 +142,28 @@ void CMPEG2Decoder::Sequence_Header()
if ((load_intra_quantizer_matrix = Get_Bits(1)))
{
- for (i=0; i<64; i++)
+ for (i = 0; i < 64; i++)
intra_quantizer_matrix[scan[ZIG_ZAG][i]] = Get_Bits(8);
}
else
{
- for (i=0; i<64; i++)
+ for (i = 0; i < 64; i++)
intra_quantizer_matrix[i] = default_intra_quantizer_matrix[i];
}
if ((load_non_intra_quantizer_matrix = Get_Bits(1)))
{
- for (i=0; i<64; i++)
+ for (i = 0; i < 64; i++)
non_intra_quantizer_matrix[scan[ZIG_ZAG][i]] = Get_Bits(8);
}
else
{
- for (i=0; i<64; i++)
+ for (i = 0; i < 64; i++)
non_intra_quantizer_matrix[i] = 16;
}
/* copy luminance to chrominance matrices */
- for (i=0; i<64; i++)
+ for (i = 0; i < 64; i++)
{
chroma_intra_quantizer_matrix[i] = intra_quantizer_matrix[i];
chroma_non_intra_quantizer_matrix[i] = non_intra_quantizer_matrix[i];
@@ -189,7 +189,7 @@ int CMPEG2Decoder::slice_header()
int quantizer_scale_code = Get_Bits(5);
if (mpeg_type == IS_MPEG2)
- quantizer_scale = q_scale_type ? Non_Linear_quantizer_scale[quantizer_scale_code] : quantizer_scale_code<<1;
+ quantizer_scale = q_scale_type ? Non_Linear_quantizer_scale[quantizer_scale_code] : quantizer_scale_code << 1;
else
quantizer_scale = quantizer_scale_code;
@@ -206,11 +206,11 @@ void CMPEG2Decoder::extension_and_user_data()
Next_Start_Code();
- while ((code = Show_Bits(32))==EXTENSION_START_CODE || code==USER_DATA_START_CODE)
+ while ((code = Show_Bits(32)) == EXTENSION_START_CODE || code == USER_DATA_START_CODE)
{
if (Fault_Flag == OUT_OF_BITS) return;
- if (code==EXTENSION_START_CODE)
+ if (code == EXTENSION_START_CODE)
{
Flush_Buffer(32);
ext_ID = Get_Bits(4);
@@ -256,10 +256,10 @@ void CMPEG2Decoder::extension_and_user_data()
__forceinline void CMPEG2Decoder::sequence_extension()
{
Flush_Buffer(8); //Get_Bits(8); //profile_and_level_indication
- progressive_sequence = Get_Bits(1);
- chroma_format = Get_Bits(2);
+ progressive_sequence = Get_Bits(1);
+ chroma_format = Get_Bits(2);
int horizontal_size_extension = Get_Bits(2) << 12;
- int vertical_size_extension = Get_Bits(2) << 12;
+ int vertical_size_extension = Get_Bits(2) << 12;
#if 0
Get_Bits(12); //bit_rate_extension
Flush_Buffer(1); // marker bit
@@ -305,21 +305,21 @@ void CMPEG2Decoder::quant_matrix_extension()
int i;
if ((load_intra_quantizer_matrix = Get_Bits(1)))
- for (i=0; i<64; i++)
+ for (i = 0; i < 64; i++)
chroma_intra_quantizer_matrix[scan[ZIG_ZAG][i]]
- = intra_quantizer_matrix[scan[ZIG_ZAG][i]] = Get_Bits(8);
+ = intra_quantizer_matrix[scan[ZIG_ZAG][i]] = Get_Bits(8);
if ((load_non_intra_quantizer_matrix = Get_Bits(1)))
- for (i=0; i<64; i++)
+ for (i = 0; i < 64; i++)
chroma_non_intra_quantizer_matrix[scan[ZIG_ZAG][i]]
- = non_intra_quantizer_matrix[scan[ZIG_ZAG][i]] = Get_Bits(8);
+ = non_intra_quantizer_matrix[scan[ZIG_ZAG][i]] = Get_Bits(8);
if ((load_chroma_intra_quantizer_matrix = Get_Bits(1)))
- for (i=0; i<64; i++)
+ for (i = 0; i < 64; i++)
chroma_intra_quantizer_matrix[scan[ZIG_ZAG][i]] = Get_Bits(8);
if ((load_chroma_non_intra_quantizer_matrix = Get_Bits(1)))
- for (i=0; i<64; i++)
+ for (i = 0; i < 64; i++)
chroma_non_intra_quantizer_matrix[scan[ZIG_ZAG][i]] = Get_Bits(8);
}
@@ -336,7 +336,7 @@ void CMPEG2Decoder::picture_display_extension()
/* based on ISO/IEC 13818-2 section 6.3.12
(November 1994) Picture display extensions */
- /* derive number_of_frame_center_offsets */
+ /* derive number_of_frame_center_offsets */
if (progressive_sequence)
{
if (repeat_first_field)
@@ -351,7 +351,7 @@ void CMPEG2Decoder::picture_display_extension()
}
else
{
- if (picture_structure!=FRAME_PICTURE)
+ if (picture_structure != FRAME_PICTURE)
number_of_frame_center_offsets = 1;
else
{
@@ -363,7 +363,7 @@ void CMPEG2Decoder::picture_display_extension()
}
/* now parse */
- for (i=0; i
#include "global.h"
@@ -71,12 +71,12 @@ void CMPEG2Decoder::Decode_Picture(YV12PICT& dst)
void CMPEG2Decoder::update_picture_buffers()
{
int cc; /* color component index */
- uint8_t *tmp; /* temporary swap pointer */
+ uint8_t* tmp; /* temporary swap pointer */
- for (cc=0; cc<3; cc++)
+ for (cc = 0; cc < 3; cc++)
{
/* B pictures do not need to be save for future reference */
- if (picture_coding_type==B_TYPE)
+ if (picture_coding_type == B_TYPE)
{
current_frame[cc] = auxframe[cc];
}
@@ -102,8 +102,8 @@ void CMPEG2Decoder::update_picture_buffers()
current_frame[cc] = backward_reference_frame[cc];
}
- if (picture_structure==BOTTOM_FIELD)
- current_frame[cc] += (cc==0) ? Coded_Picture_Width : Chroma_Width;
+ if (picture_structure == BOTTOM_FIELD)
+ current_frame[cc] += (cc == 0) ? Coded_Picture_Width : Chroma_Width;
}
}
@@ -162,7 +162,7 @@ inline void CMPEG2Decoder::slice(int MBAmax, uint32_t code)
// Set up pointer for storing quants for info and showQ.
int* qp = (picture_coding_type == B_TYPE) ? auxQP : backwardQP;
if (picture_structure == BOTTOM_FIELD)
- qp += mb_width*mb_height / 2;
+ qp += mb_width * mb_height / 2;
// This while loop condition just prevents us from processing more than
// the maximum number of macroblocks possible in a picture. The loop is
@@ -186,8 +186,9 @@ inline void CMPEG2Decoder::slice(int MBAmax, uint32_t code)
}
if (MBAinc == 1) {
decode_macroblock(macroblock_type, motion_type, dct_type, PMV,
- dc_dct_pred, motion_vertical_field_select, dmvector);
- } else {
+ dc_dct_pred, motion_vertical_field_select, dmvector);
+ }
+ else {
/* ISO/IEC 13818-2 section 7.6.6 */
skipped_macroblock(dc_dct_pred, PMV, motion_type, motion_vertical_field_select, macroblock_type);
}
@@ -200,7 +201,7 @@ inline void CMPEG2Decoder::slice(int MBAmax, uint32_t code)
/* ISO/IEC 13818-2 section 7.6 */
motion_compensation(MBA, macroblock_type, motion_type, PMV,
- motion_vertical_field_select, dmvector, dct_type);
+ motion_vertical_field_select, dmvector, dct_type);
/* advance to next macroblock */
++MBA;
@@ -210,8 +211,8 @@ inline void CMPEG2Decoder::slice(int MBAmax, uint32_t code)
/* ISO/IEC 13818-2 section 6.3.17.1: Macroblock modes */
void CMPEG2Decoder::macroblock_modes(int& macroblock_type, int& motion_type,
- int& motion_vector_count, int& mv_format,
- int& dmv, int& mvscale, int& dct_type)
+ int& motion_vector_count, int& mv_format,
+ int& dmv, int& mvscale, int& dct_type)
{
/* get macroblock_type */
macroblock_type = Get_macroblock_type();
@@ -219,14 +220,16 @@ void CMPEG2Decoder::macroblock_modes(int& macroblock_type, int& motion_type,
return;
/* get frame/field motion type */
- if (macroblock_type & (MACROBLOCK_MOTION_FORWARD|MACROBLOCK_MOTION_BACKWARD)) {
+ if (macroblock_type & (MACROBLOCK_MOTION_FORWARD | MACROBLOCK_MOTION_BACKWARD)) {
if (picture_structure == FRAME_PICTURE && frame_pred_frame_dct)
motion_type = MC_FRAME;
else
motion_type = Get_Bits(2);
- } else if (picture_structure == FRAME_PICTURE) {
+ }
+ else if (picture_structure == FRAME_PICTURE) {
motion_type = MC_FRAME;
- } else {
+ }
+ else {
motion_type = MC_FIELD;
}
@@ -234,7 +237,8 @@ void CMPEG2Decoder::macroblock_modes(int& macroblock_type, int& motion_type,
if (picture_structure == FRAME_PICTURE) {
motion_vector_count = (motion_type == MC_FIELD) ? 2 : 1;
mv_format = (motion_type == MC_FRAME) ? MV_FRAME : MV_FIELD;
- } else {
+ }
+ else {
motion_vector_count = (motion_type == MC_16X8) ? 2 : 1;
mv_format = MV_FIELD;
}
@@ -248,8 +252,8 @@ void CMPEG2Decoder::macroblock_modes(int& macroblock_type, int& motion_type,
mvscale = (mv_format == MV_FIELD && picture_structure == FRAME_PICTURE);
/* get dct_type (frame DCT / field DCT) */
- dct_type = (picture_structure==FRAME_PICTURE) && (!frame_pred_frame_dct)
- && (macroblock_type & (MACROBLOCK_PATTERN|MACROBLOCK_INTRA)) ? Get_Bits(1) : 0;
+ dct_type = (picture_structure == FRAME_PICTURE) && (!frame_pred_frame_dct)
+ && (macroblock_type & (MACROBLOCK_PATTERN | MACROBLOCK_INTRA)) ? Get_Bits(1) : 0;
}
/* move/add 8x8-Block from block[comp] to backward_reference_frame */
@@ -282,29 +286,34 @@ void CMPEG2Decoder::add_block(int count, int bx, int by, int dct_type, int addfl
if (dct_type) {
rfp = current_frame[0] + Coded_Picture_Width * (by + (comp & 2) / static_cast(2)) + bx + (comp & static_cast(1)) * 8;
iincr = Coded_Picture_Width * 2;
- } else {
+ }
+ else {
rfp = current_frame[0] + Coded_Picture_Width * (by + (comp & 2) * static_cast(4)) + bx + (comp & static_cast(1)) * 8;
iincr = Coded_Picture_Width;
}
- } else {
+ }
+ else {
rfp = current_frame[0] + (Coded_Picture_Width * static_cast(2)) * (by + (comp & 2) * static_cast(4)) + bx + (comp & 1) * static_cast(8);
iincr = Coded_Picture_Width * 2;
}
- } else {
+ }
+ else {
if (chroma_format != CHROMA444) bxh /= 2;
- if (chroma_format==CHROMA420) byh /= 2;
+ if (chroma_format == CHROMA420) byh /= 2;
- if (picture_structure==FRAME_PICTURE) {
+ if (picture_structure == FRAME_PICTURE) {
if (dct_type && chroma_format != CHROMA420) {
/* field DCT coding */
rfp = current_frame[cc] + Chroma_Width * (byh + (comp & 2) / static_cast(2)) + bxh + (comp & 8);
iincr = Chroma_Width * 2;
- } else {
+ }
+ else {
/* frame DCT coding */
rfp = current_frame[cc] + Chroma_Width * (byh + (comp & 2) * static_cast(4)) + bxh + (comp & 8);
iincr = Chroma_Width;
}
- } else {
+ }
+ else {
/* field picture */
rfp = current_frame[cc] + (Chroma_Width * static_cast(2)) * (byh + (comp & 2) * static_cast(4)) + bxh + (comp & 8);
iincr = Chroma_Width * 2;
@@ -358,7 +367,8 @@ void CMPEG2Decoder::add_block(int count, int bx, int by, int dct_type, int addfl
_mm_storel_epi64(reinterpret_cast<__m128i*>(rfp), _mm_packus_epi16(r0, r0));
_mm_storel_epi64(reinterpret_cast<__m128i*>(rfp1), _mm_packus_epi16(r1, r1));
- } else {
+ }
+ else {
r0 = _mm_load_si128(reinterpret_cast(blockp));
r1 = _mm_load_si128(reinterpret_cast(blockp + 8));
r0 = _mm_add_epi8(_mm_packs_epi16(r0, r1), mask);
@@ -413,15 +423,15 @@ void CMPEG2Decoder::clear_block(int count)
/* ISO/IEC 13818-2 section 7.6 */
void CMPEG2Decoder::motion_compensation(int MBA, int macroblock_type, int motion_type,
- int PMV[2][2][2], int motion_vertical_field_select[2][2],
- int dmvector[2], int dct_type)
+ int PMV[2][2][2], int motion_vertical_field_select[2][2],
+ int dmvector[2], int dct_type)
{
prefetchTables();
/* derive current macroblock position within picture */
/* ISO/IEC 13818-2 section 6.3.1.6 and 6.3.1.7 */
- int bx = 16*(MBA%mb_width);
- int by = 16*(MBA/mb_width);
+ int bx = 16 * (MBA % mb_width);
+ int by = 16 * (MBA / mb_width);
/* motion compensation */
if (!(macroblock_type & MACROBLOCK_INTRA))
@@ -435,24 +445,24 @@ void CMPEG2Decoder::motion_compensation(int MBA, int macroblock_type, int motion
}
idctFunction(block[block_count - 1]);
- add_block(block_count, bx, by, dct_type, (macroblock_type & MACROBLOCK_INTRA)==0);
+ add_block(block_count, bx, by, dct_type, (macroblock_type & MACROBLOCK_INTRA) == 0);
}
/* ISO/IEC 13818-2 section 7.6.6 */
void CMPEG2Decoder::skipped_macroblock(int dc_dct_pred[3], int PMV[2][2][2], int& motion_type,
- int motion_vertical_field_select[2][2], int& macroblock_type)
+ int motion_vertical_field_select[2][2], int& macroblock_type)
{
clear_block(block_count);
/* reset intra_dc predictors */
/* ISO/IEC 13818-2 section 7.2.1: DC coefficients in intra blocks */
- dc_dct_pred[0] = dc_dct_pred[1] = dc_dct_pred[2]=0;
+ dc_dct_pred[0] = dc_dct_pred[1] = dc_dct_pred[2] = 0;
/* reset motion vector predictors */
/* ISO/IEC 13818-2 section 7.6.3.4: Resetting motion vector predictors */
if (picture_coding_type == P_TYPE)
- PMV[0][0][0]=PMV[0][0][1]=PMV[1][0][0]=PMV[1][0][1]=0;
+ PMV[0][0][0] = PMV[0][0][1] = PMV[1][0][0] = PMV[1][0][1] = 0;
/* derive motion_type */
if (picture_structure == FRAME_PICTURE)
@@ -476,15 +486,15 @@ void CMPEG2Decoder::skipped_macroblock(int dc_dct_pred[3], int PMV[2][2][2], int
/* ISO/IEC 13818-2 sections 7.2 through 7.5 */
void CMPEG2Decoder::decode_macroblock(int& macroblock_type, int& motion_type, int& dct_type,
- int PMV[2][2][2], int dc_dct_pred[3],
- int motion_vertical_field_select[2][2], int dmvector[2])
+ int PMV[2][2][2], int dc_dct_pred[3],
+ int motion_vertical_field_select[2][2], int dmvector[2])
{
int quantizer_scale_code, comp, motion_vector_count, mv_format;
int dmv, mvscale, coded_block_pattern;
/* ISO/IEC 13818-2 section 6.3.17.1: Macroblock modes */
macroblock_modes(macroblock_type, motion_type, motion_vector_count, mv_format,
- dmv, mvscale, dct_type);
+ dmv, mvscale, dct_type);
if (Fault_Flag)
{
return; // go to next slice
@@ -514,11 +524,11 @@ void CMPEG2Decoder::decode_macroblock(int& macroblock_type, int& motion_type, in
if (mpeg_type == IS_MPEG2)
{
motion_vectors(PMV, dmvector, motion_vertical_field_select, 0,
- motion_vector_count, mv_format, f_code[0][0]-1, f_code[0][1]-1, dmv, mvscale);
+ motion_vector_count, mv_format, f_code[0][0] - 1, f_code[0][1] - 1, dmv, mvscale);
}
else
{
- motion_vector(PMV[0][0], dmvector, forward_f_code-1, forward_f_code-1, dmv, mvscale, full_pel_forward_vector);
+ motion_vector(PMV[0][0], dmvector, forward_f_code - 1, forward_f_code - 1, dmv, mvscale, full_pel_forward_vector);
}
}
if (Fault_Flag)
@@ -532,11 +542,11 @@ void CMPEG2Decoder::decode_macroblock(int& macroblock_type, int& motion_type, in
if (mpeg_type == IS_MPEG2)
{
motion_vectors(PMV, dmvector, motion_vertical_field_select, 1,
- motion_vector_count,mv_format, f_code[1][0]-1, f_code[1][1]-1, 0, mvscale);
+ motion_vector_count, mv_format, f_code[1][0] - 1, f_code[1][1] - 1, 0, mvscale);
}
else
{
- motion_vector(PMV[0][1], dmvector, backward_f_code-1, backward_f_code-1, dmv, mvscale, full_pel_backward_vector);
+ motion_vector(PMV[0][1], dmvector, backward_f_code - 1, backward_f_code - 1, dmv, mvscale, full_pel_backward_vector);
}
}
if (Fault_Flag)
@@ -553,13 +563,13 @@ void CMPEG2Decoder::decode_macroblock(int& macroblock_type, int& motion_type, in
{
coded_block_pattern = Get_coded_block_pattern();
- if (chroma_format==CHROMA422)
- coded_block_pattern = (coded_block_pattern<<2) | Get_Bits(2);
- else if (chroma_format==CHROMA444)
- coded_block_pattern = (coded_block_pattern<<6) | Get_Bits(6);
+ if (chroma_format == CHROMA422)
+ coded_block_pattern = (coded_block_pattern << 2) | Get_Bits(2);
+ else if (chroma_format == CHROMA444)
+ coded_block_pattern = (coded_block_pattern << 6) | Get_Bits(6);
}
else
- coded_block_pattern = (macroblock_type & MACROBLOCK_INTRA) ? (1<=16384)
- tab = &DCTtabnext[(code>>12)-4];
+ if (code >= 16384)
+ tab = &DCTtabnext[(code >> 12) - 4];
else if (code >= 1024)
- tab = &DCTtab0[(code>>8)-4];
+ tab = &DCTtab0[(code >> 8) - 4];
else if (code >= 512)
- tab = &DCTtab1[(code>>6)-8];
+ tab = &DCTtab1[(code >> 6) - 8];
else if (code >= 256)
- tab = &DCTtab2[(code>>4)-16];
+ tab = &DCTtab2[(code >> 4) - 16];
else if (code >= 128)
- tab = &DCTtab3[(code>>3)-16];
+ tab = &DCTtab3[(code >> 3) - 16];
else if (code >= 64)
- tab = &DCTtab4[(code>>2)-16];
+ tab = &DCTtab4[(code >> 2) - 16];
else if (code >= 32)
- tab = &DCTtab5[(code>>1)-16];
+ tab = &DCTtab5[(code >> 1) - 16];
else if (code >= 16)
- tab = &DCTtab6[code-16];
+ tab = &DCTtab6[code - 16];
else
{
Fault_Flag = 1;
@@ -694,7 +704,7 @@ void CMPEG2Decoder::decode_mpeg1_intra_block(int comp, int dc_dct_pred[])
if (val == 65)
{
// escape
- i+= Get_Bits(6);
+ i += Get_Bits(6);
val = Get_Bits(8);
if (val == 0)
val = Get_Bits(8);
@@ -704,7 +714,7 @@ void CMPEG2Decoder::decode_mpeg1_intra_block(int comp, int dc_dct_pred[])
val -= 256;
sign = (val < 0);
if (sign)
- val = - val;
+ val = -val;
}
else
{
@@ -727,45 +737,45 @@ void CMPEG2Decoder::decode_mpeg1_intra_block(int comp, int dc_dct_pred[])
if (val >= 2048) val = 2047 + sign; // saturation
if (sign)
val = -val;
- bp[j] = (int16_t) val;
+ bp[j] = (int16_t)val;
}
}
/* decode one non-intra coded MPEG-1 block */
void CMPEG2Decoder::decode_mpeg1_non_intra_block(int comp)
{
- int32_t code, val=0, i, j, sign;
- const DCTtab *tab;
- int16_t *bp;
+ int32_t code, val = 0, i, j, sign;
+ const DCTtab* tab;
+ int16_t* bp;
bp = block[comp];
/* decode AC coefficients */
- for (i=0; ; i++)
+ for (i = 0; ; i++)
{
code = Show_Bits(16);
if (code >= 16384)
{
if (i)
- tab = &DCTtabnext[(code>>12)-4];
+ tab = &DCTtabnext[(code >> 12) - 4];
else
- tab = &DCTtabfirst[(code>>12)-4];
+ tab = &DCTtabfirst[(code >> 12) - 4];
}
else if (code >= 1024)
- tab = &DCTtab0[(code>>8)-4];
+ tab = &DCTtab0[(code >> 8) - 4];
else if (code >= 512)
- tab = &DCTtab1[(code>>6)-8];
+ tab = &DCTtab1[(code >> 6) - 8];
else if (code >= 256)
- tab = &DCTtab2[(code>>4)-16];
+ tab = &DCTtab2[(code >> 4) - 16];
else if (code >= 128)
- tab = &DCTtab3[(code>>3)-16];
+ tab = &DCTtab3[(code >> 3) - 16];
else if (code >= 64)
- tab = &DCTtab4[(code>>2)-16];
+ tab = &DCTtab4[(code >> 2) - 16];
else if (code >= 32)
- tab = &DCTtab5[(code>>1)-16];
+ tab = &DCTtab5[(code >> 1) - 16];
else if (code >= 16)
- tab = &DCTtab6[code-16];
+ tab = &DCTtab6[code - 16];
else
{
Fault_Flag = 1;
@@ -778,7 +788,7 @@ void CMPEG2Decoder::decode_mpeg1_non_intra_block(int comp)
if (val == 65)
{
// escape
- i+= Get_Bits(6);
+ i += Get_Bits(6);
val = Get_Bits(8);
if (val == 0)
val = Get_Bits(8);
@@ -787,9 +797,9 @@ void CMPEG2Decoder::decode_mpeg1_non_intra_block(int comp)
else if (val > 128)
val -= 256;
- sign = (val<0);
+ sign = (val < 0);
if (sign)
- val = - val;
+ val = -val;
}
else
{
@@ -806,14 +816,14 @@ void CMPEG2Decoder::decode_mpeg1_non_intra_block(int comp)
}
j = scan[0][i];
- val = (((val<<1)+1) * quantizer_scale * non_intra_quantizer_matrix[j]) >> 4;
+ val = (((val << 1) + 1) * quantizer_scale * non_intra_quantizer_matrix[j]) >> 4;
if (val)
val = (val - 1) | 1; // mismatch
if (val >= 2048)
val = 2047 + sign; //saturation
if (sign)
val = -val;
- bp[j] = (int16_t) val;
+ bp[j] = (int16_t)val;
}
}
@@ -821,12 +831,12 @@ void CMPEG2Decoder::decode_mpeg1_non_intra_block(int comp)
void CMPEG2Decoder::Decode_MPEG2_Intra_Block(int comp, int dc_dct_pred[])
{
int32_t code, val = 0, i, j, sign, sum;
- const DCTtab *tab;
- int16_t *bp;
- int *qmat;
+ const DCTtab* tab;
+ int16_t* bp;
+ int* qmat;
bp = block[comp];
- qmat = (comp<4 || chroma_format==CHROMA420)
+ qmat = (comp < 4 || chroma_format == CHROMA420)
? intra_quantizer_matrix : chroma_intra_quantizer_matrix;
/* ISO/IEC 13818-2 section 7.2.1: decode DC coefficients */
@@ -846,44 +856,44 @@ void CMPEG2Decoder::Decode_MPEG2_Intra_Block(int comp, int dc_dct_pred[])
}
sum = val << (3 - intra_dc_precision);
- bp[0] = (int16_t) sum;
+ bp[0] = (int16_t)sum;
/* decode AC coefficients */
- for (i=1; ; i++)
+ for (i = 1; ; i++)
{
code = Show_Bits(16);
if (code >= 16384)
{
if (intra_vlc_format)
- tab = &DCTtab0a[(code>>8)-4];
+ tab = &DCTtab0a[(code >> 8) - 4];
else
- tab = &DCTtabnext[(code>>12)-4];
+ tab = &DCTtabnext[(code >> 12) - 4];
}
else if (code >= 1024)
{
if (intra_vlc_format)
- tab = &DCTtab0a[(code>>8)-4];
+ tab = &DCTtab0a[(code >> 8) - 4];
else
- tab = &DCTtab0[(code>>8)-4];
+ tab = &DCTtab0[(code >> 8) - 4];
}
else if (code >= 512)
{
if (intra_vlc_format)
- tab = &DCTtab1a[(code>>6)-8];
+ tab = &DCTtab1a[(code >> 6) - 8];
else
- tab = &DCTtab1[(code>>6)-8];
+ tab = &DCTtab1[(code >> 6) - 8];
}
else if (code >= 256)
- tab = &DCTtab2[(code>>4)-16];
+ tab = &DCTtab2[(code >> 4) - 16];
else if (code >= 128)
- tab = &DCTtab3[(code>>3)-16];
+ tab = &DCTtab3[(code >> 3) - 16];
else if (code >= 64)
- tab = &DCTtab4[(code>>2)-16];
+ tab = &DCTtab4[(code >> 2) - 16];
else if (code >= 32)
- tab = &DCTtab5[(code>>1)-16];
+ tab = &DCTtab5[(code >> 1) - 16];
else if (code >= 16)
- tab = &DCTtab6[code-16];
+ tab = &DCTtab6[code - 16];
else
{
Fault_Flag = 1;
@@ -896,7 +906,7 @@ void CMPEG2Decoder::Decode_MPEG2_Intra_Block(int comp, int dc_dct_pred[])
if (val == 65)
{
// escape
- i+= Get_Bits(6);
+ i += Get_Bits(6);
val = Get_Bits(12);
if (!(val & 2047))
{
@@ -911,7 +921,7 @@ void CMPEG2Decoder::Decode_MPEG2_Intra_Block(int comp, int dc_dct_pred[])
{
if (val == 64)
break;
- i+= val;
+ i += val;
val = tab->level;
sign = Get_Bits(1);
}
@@ -926,7 +936,7 @@ void CMPEG2Decoder::Decode_MPEG2_Intra_Block(int comp, int dc_dct_pred[])
val = 2047 + sign; // saturation
if (sign)
val = -val;
- bp[j] = (int16_t) val;
+ bp[j] = (int16_t)val;
sum ^= val; // mismatch
}
@@ -938,41 +948,41 @@ void CMPEG2Decoder::Decode_MPEG2_Intra_Block(int comp, int dc_dct_pred[])
void CMPEG2Decoder::Decode_MPEG2_Non_Intra_Block(int comp)
{
int32_t code, val = 0, i, j, sign, sum;
- const DCTtab *tab;
- int16_t *bp;
- int *qmat;
+ const DCTtab* tab;
+ int16_t* bp;
+ int* qmat;
bp = block[comp];
- qmat = (comp<4 || chroma_format==CHROMA420)
+ qmat = (comp < 4 || chroma_format == CHROMA420)
? non_intra_quantizer_matrix : chroma_non_intra_quantizer_matrix;
/* decode AC coefficients */
sum = 0;
- for (i=0; ; i++)
+ for (i = 0; ; i++)
{
code = Show_Bits(16);
if (code >= 16384)
{
if (i)
- tab = &DCTtabnext[(code>>12)-4];
+ tab = &DCTtabnext[(code >> 12) - 4];
else
- tab = &DCTtabfirst[(code>>12)-4];
+ tab = &DCTtabfirst[(code >> 12) - 4];
}
else if (code >= 1024)
- tab = &DCTtab0[(code>>8)-4];
+ tab = &DCTtab0[(code >> 8) - 4];
else if (code >= 512)
- tab = &DCTtab1[(code>>6)-8];
+ tab = &DCTtab1[(code >> 6) - 8];
else if (code >= 256)
- tab = &DCTtab2[(code>>4)-16];
+ tab = &DCTtab2[(code >> 4) - 16];
else if (code >= 128)
- tab = &DCTtab3[(code>>3)-16];
+ tab = &DCTtab3[(code >> 3) - 16];
else if (code >= 64)
- tab = &DCTtab4[(code>>2)-16];
+ tab = &DCTtab4[(code >> 2) - 16];
else if (code >= 32)
- tab = &DCTtab5[(code>>1)-16];
+ tab = &DCTtab5[(code >> 1) - 16];
else if (code >= 16)
- tab = &DCTtab6[code-16];
+ tab = &DCTtab6[code - 16];
else
{
Fault_Flag = 1;
@@ -985,7 +995,7 @@ void CMPEG2Decoder::Decode_MPEG2_Non_Intra_Block(int comp)
if (val == 65)
{
// escape
- i+= Get_Bits(6);
+ i += Get_Bits(6);
val = Get_Bits(12);
if (!(val & 2047))
{
@@ -1000,7 +1010,7 @@ void CMPEG2Decoder::Decode_MPEG2_Non_Intra_Block(int comp)
{
if (val == 64)
break;
- i+= val;
+ i += val;
val = tab->level;
sign = Get_Bits(1);
}
@@ -1011,12 +1021,12 @@ void CMPEG2Decoder::Decode_MPEG2_Non_Intra_Block(int comp)
}
j = scan[alternate_scan][i];
- val = (((val<<1)+1) * quantizer_scale * qmat[j]) >> 5;
+ val = (((val << 1) + 1) * quantizer_scale * qmat[j]) >> 5;
if (val >= 2048)
val = 2047 + sign; // saturation
if (sign)
val = -val;
- bp[j] = (int16_t) val;
+ bp[j] = (int16_t)val;
sum ^= val; // mismatch
}
@@ -1164,7 +1174,7 @@ int CMPEG2Decoder::Get_macroblock_address_increment()
if (code == 8)
{
/* macroblock_escape */
- val+= 33;
+ val += 33;
}
else if (code == 0)
{
@@ -1226,7 +1236,7 @@ int CMPEG2Decoder::Get_Luma_DC_dct_diff()
/* decode length */
code = Show_Bits(5);
- if (code<31)
+ if (code < 31)
{
size = DClumtab0[code].val;
Flush_Buffer(DClumtab0[code].len);
@@ -1238,14 +1248,14 @@ int CMPEG2Decoder::Get_Luma_DC_dct_diff()
Flush_Buffer(DClumtab1[code].len);
}
- if (size==0)
+ if (size == 0)
dct_diff = 0;
else
{
dct_diff = Get_Bits(size);
- if ((dct_diff & (1<<(size-1)))==0)
- dct_diff-= (1<>1, PMV[0][0][0], PMV[0][0][1]>>1, stw);
+ current_frame, 0, Coded_Picture_Width << 1, Coded_Picture_Width << 1, 16, 8,
+ bx, by >> 1, PMV[0][0][0], PMV[0][0][1] >> 1, stw);
/* bottom field prediction */
form_prediction(forward_reference_frame, motion_vertical_field_select[1][0],
- current_frame, 1, Coded_Picture_Width<<1, Coded_Picture_Width<<1, 16, 8,
- bx, by>>1, PMV[1][0][0], PMV[1][0][1]>>1, stw);
+ current_frame, 1, Coded_Picture_Width << 1, Coded_Picture_Width << 1, 16, 8,
+ bx, by >> 1, PMV[1][0][0], PMV[1][0][1] >> 1, stw);
}
- else if (motion_type==MC_DMV) /* dual prime prediction */
+ else if (motion_type == MC_DMV) /* dual prime prediction */
{
/* calculate derived motion vectors */
- Dual_Prime_Arithmetic(DMV, dmvector, PMV[0][0][0], PMV[0][0][1]>>1);
+ Dual_Prime_Arithmetic(DMV, dmvector, PMV[0][0][0], PMV[0][0][1] >> 1);
/* predict top field from top field */
form_prediction(forward_reference_frame, 0, current_frame, 0,
- Coded_Picture_Width<<1, Coded_Picture_Width<<1, 16, 8, bx, by>>1,
- PMV[0][0][0], PMV[0][0][1]>>1, 0);
+ Coded_Picture_Width << 1, Coded_Picture_Width << 1, 16, 8, bx, by >> 1,
+ PMV[0][0][0], PMV[0][0][1] >> 1, 0);
/* predict and add to top field from bottom field */
form_prediction(forward_reference_frame, 1, current_frame, 0,
- Coded_Picture_Width<<1, Coded_Picture_Width<<1, 16, 8, bx, by>>1,
+ Coded_Picture_Width << 1, Coded_Picture_Width << 1, 16, 8, bx, by >> 1,
DMV[0][0], DMV[0][1], 1);
/* predict bottom field from bottom field */
form_prediction(forward_reference_frame, 1, current_frame, 1,
- Coded_Picture_Width<<1, Coded_Picture_Width<<1, 16, 8, bx, by>>1,
- PMV[0][0][0], PMV[0][0][1]>>1, 0);
+ Coded_Picture_Width << 1, Coded_Picture_Width << 1, 16, 8, bx, by >> 1,
+ PMV[0][0][0], PMV[0][0][1] >> 1, 0);
/* predict and add to bottom field from top field */
form_prediction(forward_reference_frame, 0, current_frame, 1,
- Coded_Picture_Width<<1, Coded_Picture_Width<<1, 16, 8, bx, by>>1,
+ Coded_Picture_Width << 1, Coded_Picture_Width << 1, 16, 8, bx, by >> 1,
DMV[1][0], DMV[1][1], 1);
}
}
else
{
/* field picture */
- currentfield = (picture_structure==BOTTOM_FIELD);
+ currentfield = (picture_structure == BOTTOM_FIELD);
/* determine which frame to use for prediction */
- if (picture_coding_type==P_TYPE && Second_Field && currentfield!=motion_vertical_field_select[0][0])
+ if (picture_coding_type == P_TYPE && Second_Field && currentfield != motion_vertical_field_select[0][0])
predframe = backward_reference_frame;
else
predframe = forward_reference_frame;
- if ((motion_type==MC_FIELD) || !(macroblock_type & MACROBLOCK_MOTION_FORWARD))
+ if ((motion_type == MC_FIELD) || !(macroblock_type & MACROBLOCK_MOTION_FORWARD))
{
form_prediction(predframe, motion_vertical_field_select[0][0], current_frame, 0,
- Coded_Picture_Width<<1, Coded_Picture_Width<<1, 16, 16, bx, by,
+ Coded_Picture_Width << 1, Coded_Picture_Width << 1, 16, 16, bx, by,
PMV[0][0][0], PMV[0][0][1], stw);
}
- else if (motion_type==MC_16X8)
+ else if (motion_type == MC_16X8)
{
form_prediction(predframe, motion_vertical_field_select[0][0], current_frame, 0,
- Coded_Picture_Width<<1, Coded_Picture_Width<<1, 16, 8, bx, by,
+ Coded_Picture_Width << 1, Coded_Picture_Width << 1, 16, 8, bx, by,
PMV[0][0][0], PMV[0][0][1], stw);
- if (picture_coding_type==P_TYPE && Second_Field && currentfield!=motion_vertical_field_select[1][0])
+ if (picture_coding_type == P_TYPE && Second_Field && currentfield != motion_vertical_field_select[1][0])
predframe = backward_reference_frame;
else
predframe = forward_reference_frame;
form_prediction(predframe, motion_vertical_field_select[1][0], current_frame,
- 0, Coded_Picture_Width<<1, Coded_Picture_Width<<1, 16, 8, bx, by+8,
+ 0, Coded_Picture_Width << 1, Coded_Picture_Width << 1, 16, 8, bx, by + 8,
PMV[1][0][0], PMV[1][0][1], stw);
}
- else if (motion_type==MC_DMV)
+ else if (motion_type == MC_DMV)
{
if (Second_Field)
predframe = backward_reference_frame;
@@ -1399,12 +1409,12 @@ void CMPEG2Decoder::form_predictions(int bx, int by, int macroblock_type, int mo
/* predict from field of same parity */
form_prediction(forward_reference_frame, currentfield, current_frame, 0,
- Coded_Picture_Width<<1, Coded_Picture_Width<<1, 16, 16, bx, by,
+ Coded_Picture_Width << 1, Coded_Picture_Width << 1, 16, 16, bx, by,
PMV[0][0][0], PMV[0][0][1], 0);
/* predict from field of opposite parity */
form_prediction(predframe, !currentfield, current_frame, 0,
- Coded_Picture_Width<<1, Coded_Picture_Width<<1, 16, 16, bx, by,
+ Coded_Picture_Width << 1, Coded_Picture_Width << 1, 16, 16, bx, by,
DMV[0][0], DMV[0][1], 1);
}
}
@@ -1414,108 +1424,108 @@ void CMPEG2Decoder::form_predictions(int bx, int by, int macroblock_type, int mo
if (macroblock_type & MACROBLOCK_MOTION_BACKWARD)
{
- if (picture_structure==FRAME_PICTURE)
+ if (picture_structure == FRAME_PICTURE)
{
- if (motion_type==MC_FRAME)
+ if (motion_type == MC_FRAME)
{
/* frame-based prediction */
form_prediction(backward_reference_frame, 0, current_frame, 0,
- Coded_Picture_Width, Coded_Picture_Width<<1, 16, 8, bx, by,
+ Coded_Picture_Width, Coded_Picture_Width << 1, 16, 8, bx, by,
PMV[0][1][0], PMV[0][1][1], stw);
form_prediction(backward_reference_frame, 1, current_frame, 1,
- Coded_Picture_Width, Coded_Picture_Width<<1, 16, 8, bx, by,
+ Coded_Picture_Width, Coded_Picture_Width << 1, 16, 8, bx, by,
PMV[0][1][0], PMV[0][1][1], stw);
}
else /* field-based prediction */
{
/* top field prediction */
form_prediction(backward_reference_frame, motion_vertical_field_select[0][1],
- current_frame, 0, Coded_Picture_Width<<1, Coded_Picture_Width<<1, 16, 8,
- bx, by>>1, PMV[0][1][0], PMV[0][1][1]>>1, stw);
+ current_frame, 0, Coded_Picture_Width << 1, Coded_Picture_Width << 1, 16, 8,
+ bx, by >> 1, PMV[0][1][0], PMV[0][1][1] >> 1, stw);
/* bottom field prediction */
form_prediction(backward_reference_frame, motion_vertical_field_select[1][1],
- current_frame, 1, Coded_Picture_Width<<1, Coded_Picture_Width<<1, 16, 8,
- bx, by>>1, PMV[1][1][0], PMV[1][1][1]>>1, stw);
+ current_frame, 1, Coded_Picture_Width << 1, Coded_Picture_Width << 1, 16, 8,
+ bx, by >> 1, PMV[1][1][0], PMV[1][1][1] >> 1, stw);
}
}
else
{
/* field picture */
- if (motion_type==MC_FIELD)
+ if (motion_type == MC_FIELD)
{
/* field-based prediction */
form_prediction(backward_reference_frame, motion_vertical_field_select[0][1],
- current_frame, 0, Coded_Picture_Width<<1, Coded_Picture_Width<<1, 16, 16,
+ current_frame, 0, Coded_Picture_Width << 1, Coded_Picture_Width << 1, 16, 16,
bx, by, PMV[0][1][0], PMV[0][1][1], stw);
}
- else if (motion_type==MC_16X8)
+ else if (motion_type == MC_16X8)
{
form_prediction(backward_reference_frame, motion_vertical_field_select[0][1],
- current_frame, 0, Coded_Picture_Width<<1, Coded_Picture_Width<<1, 16, 8,
+ current_frame, 0, Coded_Picture_Width << 1, Coded_Picture_Width << 1, 16, 8,
bx, by, PMV[0][1][0], PMV[0][1][1], stw);
form_prediction(backward_reference_frame, motion_vertical_field_select[1][1],
- current_frame, 0, Coded_Picture_Width<<1, Coded_Picture_Width<<1, 16, 8,
- bx, by+8, PMV[1][1][0], PMV[1][1][1], stw);
+ current_frame, 0, Coded_Picture_Width << 1, Coded_Picture_Width << 1, 16, 8,
+ bx, by + 8, PMV[1][1][0], PMV[1][1][1], stw);
}
}
}
}
// Minor rewrite to use the function pointer array - Vlad59 04-20-2002
-void CMPEG2Decoder::form_prediction(uint8_t *src[], int sfield, uint8_t *dst[],
- int dfield, int lx, int lx2, int w, int h, int x, int y,
- int dx, int dy, int average_flag)
+void CMPEG2Decoder::form_prediction(uint8_t* src[], int sfield, uint8_t* dst[],
+ int dfield, int lx, int lx2, int w, int h, int x, int y,
+ int dx, int dy, int average_flag)
{
- if ( y+(dy>>1) < 0 )
+ if (y + (dy >> 1) < 0)
{
return;
}
- uint8_t *s = (src[0]+(sfield?lx2>>1:0)) + lx * (static_cast(y) + (dy>>1)) + x + (dx>>1);
- uint8_t *d = (dst[0]+(dfield?lx2>>1:0)) + lx * static_cast(y) + x;
- int flag = ((dx & 1)<<1) + (dy & 1);
+ uint8_t* s = (src[0] + (sfield ? lx2 >> 1 : 0)) + lx * (static_cast(y) + (dy >> 1)) + x + (dx >> 1);
+ uint8_t* d = (dst[0] + (dfield ? lx2 >> 1 : 0)) + lx * static_cast(y) + x;
+ int flag = ((dx & 1) << 1) + (dy & 1);
- ppppf_motion[average_flag][w>>4][flag] (d, s, lx2, lx, h);
+ ppppf_motion[average_flag][w >> 4][flag](d, s, lx2, lx, h);
- if (chroma_format!=CHROMA444)
+ if (chroma_format != CHROMA444)
{
- lx>>=1; lx2>>=1; w>>=1; x>>=1; dx/=2;
+ lx >>= 1; lx2 >>= 1; w >>= 1; x >>= 1; dx /= 2;
}
- if (chroma_format==CHROMA420)
+ if (chroma_format == CHROMA420)
{
- h>>=1; y>>=1; dy/=2;
+ h >>= 1; y >>= 1; dy /= 2;
}
/* Cb */
- s = (src[1]+(sfield?lx2>>1:0)) + lx * (static_cast(y) + (dy>>1)) + x + (dx>>1);
- d = (dst[1]+(dfield?lx2>>1:0)) + lx * static_cast(y) + x;
- flag = ((dx & 1)<<1) + (dy & 1);
- ppppf_motion[average_flag][w>>4][flag] (d, s, lx2, lx, h);
+ s = (src[1] + (sfield ? lx2 >> 1 : 0)) + lx * (static_cast(y) + (dy >> 1)) + x + (dx >> 1);
+ d = (dst[1] + (dfield ? lx2 >> 1 : 0)) + lx * static_cast(y) + x;
+ flag = ((dx & 1) << 1) + (dy & 1);
+ ppppf_motion[average_flag][w >> 4][flag](d, s, lx2, lx, h);
/* Cr */
- s = (src[2]+(sfield?lx2>>1:0)) + lx * (static_cast(y) + (dy>>1)) + x + (dx>>1);
- d = (dst[2]+(dfield?lx2>>1:0)) + lx * static_cast(y) + x;
- ppppf_motion[average_flag][w>>4][flag] (d, s, lx2, lx, h);
+ s = (src[2] + (sfield ? lx2 >> 1 : 0)) + lx * (static_cast(y) + (dy >> 1)) + x + (dx >> 1);
+ d = (dst[2] + (dfield ? lx2 >> 1 : 0)) + lx * static_cast(y) + x;
+ ppppf_motion[average_flag][w >> 4][flag](d, s, lx2, lx, h);
}
/* ISO/IEC 13818-2 sections 6.2.5.2, 6.3.17.2, and 7.6.3: Motion vectors */
-void CMPEG2Decoder::motion_vectors(int PMV[2][2][2],int dmvector[2],
+void CMPEG2Decoder::motion_vectors(int PMV[2][2][2], int dmvector[2],
int motion_vertical_field_select[2][2], int s,
int motion_vector_count, int mv_format, int h_r_size,
int v_r_size, int dmv, int mvscale)
{
- if (motion_vector_count==1)
+ if (motion_vector_count == 1)
{
- if (mv_format==MV_FIELD && !dmv)
+ if (mv_format == MV_FIELD && !dmv)
motion_vertical_field_select[1][s] =
motion_vertical_field_select[0][s] = Get_Bits(1);
- motion_vector(PMV[0][s],dmvector,h_r_size,v_r_size,dmv,mvscale,0);
+ motion_vector(PMV[0][s], dmvector, h_r_size, v_r_size, dmv, mvscale, 0);
/* update other motion vector predictors */
PMV[1][s][0] = PMV[0][s][0];
@@ -1524,47 +1534,47 @@ void CMPEG2Decoder::motion_vectors(int PMV[2][2][2],int dmvector[2],
else
{
motion_vertical_field_select[0][s] = Get_Bits(1);
- motion_vector(PMV[0][s],dmvector,h_r_size,v_r_size,dmv,mvscale,0);
+ motion_vector(PMV[0][s], dmvector, h_r_size, v_r_size, dmv, mvscale, 0);
motion_vertical_field_select[1][s] = Get_Bits(1);
- motion_vector(PMV[1][s],dmvector,h_r_size,v_r_size,dmv,mvscale,0);
+ motion_vector(PMV[1][s], dmvector, h_r_size, v_r_size, dmv, mvscale, 0);
}
}
/* ISO/IEC 13818-2 section 7.6.3.6: Dual prime additional arithmetic */
-void CMPEG2Decoder::Dual_Prime_Arithmetic(int DMV[][2],int *dmvector, int mvx,int mvy)
+void CMPEG2Decoder::Dual_Prime_Arithmetic(int DMV[][2], int* dmvector, int mvx, int mvy)
{
- if (picture_structure==FRAME_PICTURE)
+ if (picture_structure == FRAME_PICTURE)
{
if (top_field_first)
{
/* vector for prediction of top field from bottom field */
- DMV[0][0] = ((mvx +(mvx>0))>>1) + dmvector[0];
- DMV[0][1] = ((mvy +(mvy>0))>>1) + dmvector[1] - 1;
+ DMV[0][0] = ((mvx + (mvx > 0)) >> 1) + dmvector[0];
+ DMV[0][1] = ((mvy + (mvy > 0)) >> 1) + dmvector[1] - 1;
/* vector for prediction of bottom field from top field */
- DMV[1][0] = ((3*mvx+(mvx>0))>>1) + dmvector[0];
- DMV[1][1] = ((3*mvy+(mvy>0))>>1) + dmvector[1] + 1;
+ DMV[1][0] = ((3 * mvx + (mvx > 0)) >> 1) + dmvector[0];
+ DMV[1][1] = ((3 * mvy + (mvy > 0)) >> 1) + dmvector[1] + 1;
}
else
{
/* vector for prediction of top field from bottom field */
- DMV[0][0] = ((3*mvx+(mvx>0))>>1) + dmvector[0];
- DMV[0][1] = ((3*mvy+(mvy>0))>>1) + dmvector[1] - 1;
+ DMV[0][0] = ((3 * mvx + (mvx > 0)) >> 1) + dmvector[0];
+ DMV[0][1] = ((3 * mvy + (mvy > 0)) >> 1) + dmvector[1] - 1;
/* vector for prediction of bottom field from top field */
- DMV[1][0] = ((mvx +(mvx>0))>>1) + dmvector[0];
- DMV[1][1] = ((mvy +(mvy>0))>>1) + dmvector[1] + 1;
+ DMV[1][0] = ((mvx + (mvx > 0)) >> 1) + dmvector[0];
+ DMV[1][1] = ((mvy + (mvy > 0)) >> 1) + dmvector[1] + 1;
}
}
else
{
/* vector for prediction from field of opposite 'parity' */
- DMV[0][0] = ((mvx+(mvx>0))>>1) + dmvector[0];
- DMV[0][1] = ((mvy+(mvy>0))>>1) + dmvector[1];
+ DMV[0][0] = ((mvx + (mvx > 0)) >> 1) + dmvector[0];
+ DMV[0][1] = ((mvy + (mvy > 0)) >> 1) + dmvector[1];
/* correct for vertical field shift */
- if (picture_structure==TOP_FIELD)
+ if (picture_structure == TOP_FIELD)
DMV[0][1]--;
else
DMV[0][1]++;
@@ -1572,7 +1582,7 @@ void CMPEG2Decoder::Dual_Prime_Arithmetic(int DMV[][2],int *dmvector, int mvx,in
}
/* get and decode motion vector and differential motion vector for one prediction */
-void CMPEG2Decoder::motion_vector(int *PMV, int *dmvector, int h_r_size, int v_r_size,
+void CMPEG2Decoder::motion_vector(int* PMV, int* dmvector, int h_r_size, int v_r_size,
int dmv, int mvscale, int full_pel_vector)
{
int motion_code, motion_residual;
@@ -1581,21 +1591,21 @@ void CMPEG2Decoder::motion_vector(int *PMV, int *dmvector, int h_r_size, int v_r
/* ISO/IEC 13818-2 Table B-10 */
motion_code = Get_motion_code();
- motion_residual = (h_r_size!=0 && motion_code!=0) ? Get_Bits(h_r_size) : 0;
+ motion_residual = (h_r_size != 0 && motion_code != 0) ? Get_Bits(h_r_size) : 0;
- decode_motion_vector(&PMV[0],h_r_size,motion_code,motion_residual,full_pel_vector);
+ decode_motion_vector(&PMV[0], h_r_size, motion_code, motion_residual, full_pel_vector);
if (dmv)
dmvector[0] = Get_dmvector();
/* vertical component */
- motion_code = Get_motion_code();
- motion_residual = (v_r_size!=0 && motion_code!=0) ? Get_Bits(v_r_size) : 0;
+ motion_code = Get_motion_code();
+ motion_residual = (v_r_size != 0 && motion_code != 0) ? Get_Bits(v_r_size) : 0;
if (mvscale)
PMV[1] >>= 1; /* DIV 2 */
- decode_motion_vector(&PMV[1],v_r_size,motion_code,motion_residual,full_pel_vector);
+ decode_motion_vector(&PMV[1], v_r_size, motion_code, motion_residual, full_pel_vector);
if (mvscale)
PMV[1] <<= 1;
@@ -1608,28 +1618,28 @@ void CMPEG2Decoder::motion_vector(int *PMV, int *dmvector, int h_r_size, int v_r
/* ISO/IEC 13818-2 section 7.6.3.1: Decoding the motion vectors */
/* Note: the arithmetic here is more elegant than that which is shown
in 7.6.3.1. The end results (PMV[][][]) should, however, be the same. */
-void CMPEG2Decoder::decode_motion_vector(int *pred, int r_size, int motion_code,
+void CMPEG2Decoder::decode_motion_vector(int* pred, int r_size, int motion_code,
int motion_residual, int full_pel_vector)
{
int lim, vec;
- lim = 16<> 1) : (*pred);
- if (motion_code>0)
+ if (motion_code > 0)
{
- vec+= ((motion_code-1)<=lim)
- vec-= lim + lim;
+ vec += ((motion_code - 1) << r_size) + motion_residual + 1;
+ if (vec >= lim)
+ vec -= lim + lim;
}
- else if (motion_code<0)
+ else if (motion_code < 0)
{
- vec-= ((-motion_code-1)<
-//#include "misc.h"
+ //#include "misc.h"
#ifdef GLOBAL
@@ -41,11 +41,11 @@
#endif
enum {
- MACROBLOCK_INTRA = 1,
- MACROBLOCK_PATTERN = 2,
+ MACROBLOCK_INTRA = 1,
+ MACROBLOCK_PATTERN = 2,
MACROBLOCK_MOTION_BACKWARD = 4,
- MACROBLOCK_MOTION_FORWARD = 8,
- MACROBLOCK_QUANT = 16,
+ MACROBLOCK_MOTION_FORWARD = 8,
+ MACROBLOCK_QUANT = 16,
};
@@ -505,7 +505,7 @@ XTN DCTtab DCTtab6[16]
#endif
;
// add extra table of table ptrs for performance - trbarry 5/2003
-XTN DCTtab *pDCTtabNonI[28] // ptr to non_intra tables
+XTN DCTtab* pDCTtabNonI[28] // ptr to non_intra tables
#ifdef GLOBAL
=
{
@@ -541,7 +541,7 @@ XTN DCTtab *pDCTtabNonI[28] // ptr to non_intra tables
#endif
;
// same as above but for when intra_vlc_format - trbarry 5/2003
-XTN DCTtab *pDCTtab_intra[28] // ptr to non_intra tables
+XTN DCTtab* pDCTtab_intra[28] // ptr to non_intra tables
#ifdef GLOBAL
=
{
@@ -622,10 +622,10 @@ XTN VLCtab PMBtab0[8]
{ERROR_VALUE,0},
{MACROBLOCK_MOTION_FORWARD,3},
{MACROBLOCK_PATTERN,2}, {MACROBLOCK_PATTERN,2},
- {MACROBLOCK_MOTION_FORWARD|MACROBLOCK_PATTERN,1},
- {MACROBLOCK_MOTION_FORWARD|MACROBLOCK_PATTERN,1},
- {MACROBLOCK_MOTION_FORWARD|MACROBLOCK_PATTERN,1},
- {MACROBLOCK_MOTION_FORWARD|MACROBLOCK_PATTERN,1}
+ {MACROBLOCK_MOTION_FORWARD | MACROBLOCK_PATTERN,1},
+ {MACROBLOCK_MOTION_FORWARD | MACROBLOCK_PATTERN,1},
+ {MACROBLOCK_MOTION_FORWARD | MACROBLOCK_PATTERN,1},
+ {MACROBLOCK_MOTION_FORWARD | MACROBLOCK_PATTERN,1}
}
#endif
;
@@ -636,9 +636,9 @@ XTN VLCtab PMBtab1[8]
=
{
{ERROR_VALUE,0},
- {MACROBLOCK_QUANT|MACROBLOCK_INTRA,6},
- {MACROBLOCK_QUANT|MACROBLOCK_PATTERN,5}, {MACROBLOCK_QUANT|MACROBLOCK_PATTERN,5},
- {MACROBLOCK_QUANT|MACROBLOCK_MOTION_FORWARD|MACROBLOCK_PATTERN,5}, {MACROBLOCK_QUANT|MACROBLOCK_MOTION_FORWARD|MACROBLOCK_PATTERN,5},
+ {MACROBLOCK_QUANT | MACROBLOCK_INTRA,6},
+ {MACROBLOCK_QUANT | MACROBLOCK_PATTERN,5}, {MACROBLOCK_QUANT | MACROBLOCK_PATTERN,5},
+ {MACROBLOCK_QUANT | MACROBLOCK_MOTION_FORWARD | MACROBLOCK_PATTERN,5}, {MACROBLOCK_QUANT | MACROBLOCK_MOTION_FORWARD | MACROBLOCK_PATTERN,5},
{MACROBLOCK_INTRA,5}, {MACROBLOCK_INTRA,5}
}
#endif
@@ -652,19 +652,19 @@ XTN VLCtab BMBtab0[16]
{ERROR_VALUE,0},
{ERROR_VALUE,0},
{MACROBLOCK_MOTION_FORWARD,4},
- {MACROBLOCK_MOTION_FORWARD|MACROBLOCK_PATTERN,4},
+ {MACROBLOCK_MOTION_FORWARD | MACROBLOCK_PATTERN,4},
{MACROBLOCK_MOTION_BACKWARD,3},
{MACROBLOCK_MOTION_BACKWARD,3},
- {MACROBLOCK_MOTION_BACKWARD|MACROBLOCK_PATTERN,3},
- {MACROBLOCK_MOTION_BACKWARD|MACROBLOCK_PATTERN,3},
- {MACROBLOCK_MOTION_FORWARD|MACROBLOCK_MOTION_BACKWARD,2},
- {MACROBLOCK_MOTION_FORWARD|MACROBLOCK_MOTION_BACKWARD,2},
- {MACROBLOCK_MOTION_FORWARD|MACROBLOCK_MOTION_BACKWARD,2},
- {MACROBLOCK_MOTION_FORWARD|MACROBLOCK_MOTION_BACKWARD,2},
- {MACROBLOCK_MOTION_FORWARD|MACROBLOCK_MOTION_BACKWARD|MACROBLOCK_PATTERN,2},
- {MACROBLOCK_MOTION_FORWARD|MACROBLOCK_MOTION_BACKWARD|MACROBLOCK_PATTERN,2},
- {MACROBLOCK_MOTION_FORWARD|MACROBLOCK_MOTION_BACKWARD|MACROBLOCK_PATTERN,2},
- {MACROBLOCK_MOTION_FORWARD|MACROBLOCK_MOTION_BACKWARD|MACROBLOCK_PATTERN,2}
+ {MACROBLOCK_MOTION_BACKWARD | MACROBLOCK_PATTERN,3},
+ {MACROBLOCK_MOTION_BACKWARD | MACROBLOCK_PATTERN,3},
+ {MACROBLOCK_MOTION_FORWARD | MACROBLOCK_MOTION_BACKWARD,2},
+ {MACROBLOCK_MOTION_FORWARD | MACROBLOCK_MOTION_BACKWARD,2},
+ {MACROBLOCK_MOTION_FORWARD | MACROBLOCK_MOTION_BACKWARD,2},
+ {MACROBLOCK_MOTION_FORWARD | MACROBLOCK_MOTION_BACKWARD,2},
+ {MACROBLOCK_MOTION_FORWARD | MACROBLOCK_MOTION_BACKWARD | MACROBLOCK_PATTERN,2},
+ {MACROBLOCK_MOTION_FORWARD | MACROBLOCK_MOTION_BACKWARD | MACROBLOCK_PATTERN,2},
+ {MACROBLOCK_MOTION_FORWARD | MACROBLOCK_MOTION_BACKWARD | MACROBLOCK_PATTERN,2},
+ {MACROBLOCK_MOTION_FORWARD | MACROBLOCK_MOTION_BACKWARD | MACROBLOCK_PATTERN,2}
}
#endif
;
@@ -675,11 +675,11 @@ XTN VLCtab BMBtab1[8]
=
{
{ERROR_VALUE,0},
- {MACROBLOCK_QUANT|MACROBLOCK_INTRA,6},
- {MACROBLOCK_QUANT|MACROBLOCK_MOTION_BACKWARD|MACROBLOCK_PATTERN,6},
- {MACROBLOCK_QUANT|MACROBLOCK_MOTION_FORWARD|MACROBLOCK_PATTERN,6},
- {MACROBLOCK_QUANT|MACROBLOCK_MOTION_FORWARD|MACROBLOCK_MOTION_BACKWARD|MACROBLOCK_PATTERN,5},
- {MACROBLOCK_QUANT|MACROBLOCK_MOTION_FORWARD|MACROBLOCK_MOTION_BACKWARD|MACROBLOCK_PATTERN,5},
+ {MACROBLOCK_QUANT | MACROBLOCK_INTRA,6},
+ {MACROBLOCK_QUANT | MACROBLOCK_MOTION_BACKWARD | MACROBLOCK_PATTERN,6},
+ {MACROBLOCK_QUANT | MACROBLOCK_MOTION_FORWARD | MACROBLOCK_PATTERN,6},
+ {MACROBLOCK_QUANT | MACROBLOCK_MOTION_FORWARD | MACROBLOCK_MOTION_BACKWARD | MACROBLOCK_PATTERN,5},
+ {MACROBLOCK_QUANT | MACROBLOCK_MOTION_FORWARD | MACROBLOCK_MOTION_BACKWARD | MACROBLOCK_PATTERN,5},
{MACROBLOCK_INTRA,5},
{MACROBLOCK_INTRA,5}
}
diff --git a/src/idct_ap922_sse2.cpp b/src/idct_ap922_sse2.cpp
index b14ae85..5b1c9e4 100644
--- a/src/idct_ap922_sse2.cpp
+++ b/src/idct_ap922_sse2.cpp
@@ -250,8 +250,8 @@ idct_colx8_sse2(int16_t* block) noexcept
void idct_ap922_sse2(int16_t* block)
{
- idct_row_sse2(block + 0, table04, rounders[0]);
- idct_row_sse2(block + 8, table17, rounders[1]);
+ idct_row_sse2(block + 0, table04, rounders[0]);
+ idct_row_sse2(block + 8, table17, rounders[1]);
idct_row_sse2(block + 16, table26, rounders[2]);
idct_row_sse2(block + 24, table35, rounders[3]);
idct_row_sse2(block + 32, table04, rounders[4]);
diff --git a/src/idct_llm_float_avx2.cpp b/src/idct_llm_float_avx2.cpp
index c630d4f..5f3918b 100644
--- a/src/idct_llm_float_avx2.cpp
+++ b/src/idct_llm_float_avx2.cpp
@@ -84,8 +84,8 @@ idct_8x8_fma3(__m256& s0, __m256& s1, __m256& s2, __m256& s3, __m256& s4, __m256
z0 = _mm256_add_ps(s0, s4);
z1 = _mm256_sub_ps(s0, s4);
- z2=_mm256_fmadd_ps(s6, _mm256_load_ps(llm_coefs + 80), z4);
- z3=_mm256_fmadd_ps(s2, _mm256_load_ps(llm_coefs + 88), z4);
+ z2 = _mm256_fmadd_ps(s6, _mm256_load_ps(llm_coefs + 80), z4);
+ z3 = _mm256_fmadd_ps(s2, _mm256_load_ps(llm_coefs + 88), z4);
__m256 a0 = _mm256_add_ps(z0, z3);
__m256 a3 = _mm256_sub_ps(z0, z3);
@@ -138,7 +138,7 @@ void idct_llm_float_avx2(int16_t* block)
idct_8x8_fma3(s0, s1, s2, s3, s4, s5, s6, s7);
- float_to_dst_avx2(s0, s1, block + 0);
+ float_to_dst_avx2(s0, s1, block + 0);
float_to_dst_avx2(s2, s3, block + 16);
float_to_dst_avx2(s4, s5, block + 32);
float_to_dst_avx2(s6, s7, block + 48);
diff --git a/src/idct_llm_float_sse2.cpp b/src/idct_llm_float_sse2.cpp
index e38fa5c..3a61e17 100644
--- a/src/idct_llm_float_sse2.cpp
+++ b/src/idct_llm_float_sse2.cpp
@@ -18,7 +18,7 @@ alignas(64) static const float llm_coefs[] = {
};
-static inline void short_to_float(const short* srcp, float*dstp) noexcept
+static inline void short_to_float(const short* srcp, float* dstp) noexcept
{
const __m128i zero = _mm_setzero_si128();
@@ -52,31 +52,31 @@ static inline void idct_8x4_with_transpose(const float* srcp, float* dstp) noexc
__m128 z3 = _mm_add_ps(s1, s5);
__m128 z4 = _mm_mul_ps(_mm_add_ps(z0, z1), _mm_load_ps(llm_coefs));
- z2 =_mm_add_ps(_mm_mul_ps(z2, _mm_load_ps(llm_coefs + 4)), z4);
- z3 =_mm_add_ps(_mm_mul_ps(z3, _mm_load_ps(llm_coefs + 8)), z4);
- z0 =_mm_mul_ps(z0, _mm_load_ps(llm_coefs + 12));
- z1 =_mm_mul_ps(z1, _mm_load_ps(llm_coefs + 16));
+ z2 = _mm_add_ps(_mm_mul_ps(z2, _mm_load_ps(llm_coefs + 4)), z4);
+ z3 = _mm_add_ps(_mm_mul_ps(z3, _mm_load_ps(llm_coefs + 8)), z4);
+ z0 = _mm_mul_ps(z0, _mm_load_ps(llm_coefs + 12));
+ z1 = _mm_mul_ps(z1, _mm_load_ps(llm_coefs + 16));
- __m128 b3 =_mm_add_ps(_mm_add_ps(_mm_mul_ps(s7, _mm_load_ps(llm_coefs + 20)), z0), z2);
- __m128 b2 =_mm_add_ps(_mm_add_ps(_mm_mul_ps(s5, _mm_load_ps(llm_coefs + 24)), z1), z3);
- __m128 b1 =_mm_add_ps(_mm_add_ps(_mm_mul_ps(s3, _mm_load_ps(llm_coefs + 28)), z1), z2);
- __m128 b0 =_mm_add_ps(_mm_add_ps(_mm_mul_ps(s1, _mm_load_ps(llm_coefs + 32)), z0), z3);
+ __m128 b3 = _mm_add_ps(_mm_add_ps(_mm_mul_ps(s7, _mm_load_ps(llm_coefs + 20)), z0), z2);
+ __m128 b2 = _mm_add_ps(_mm_add_ps(_mm_mul_ps(s5, _mm_load_ps(llm_coefs + 24)), z1), z3);
+ __m128 b1 = _mm_add_ps(_mm_add_ps(_mm_mul_ps(s3, _mm_load_ps(llm_coefs + 28)), z1), z2);
+ __m128 b0 = _mm_add_ps(_mm_add_ps(_mm_mul_ps(s1, _mm_load_ps(llm_coefs + 32)), z0), z3);
z4 = _mm_mul_ps(_mm_add_ps(s2, s6), _mm_load_ps(llm_coefs + 36));
- z0=_mm_add_ps(s0, s4);
- z1=_mm_sub_ps(s0, s4);
+ z0 = _mm_add_ps(s0, s4);
+ z1 = _mm_sub_ps(s0, s4);
- z2=_mm_add_ps(z4, _mm_mul_ps(s6, _mm_load_ps(llm_coefs + 40)));
- z3=_mm_add_ps(z4, _mm_mul_ps(s2, _mm_load_ps(llm_coefs + 44)));
+ z2 = _mm_add_ps(z4, _mm_mul_ps(s6, _mm_load_ps(llm_coefs + 40)));
+ z3 = _mm_add_ps(z4, _mm_mul_ps(s2, _mm_load_ps(llm_coefs + 44)));
s0 = _mm_add_ps(z0, z3);
s3 = _mm_sub_ps(z0, z3);
s1 = _mm_add_ps(z1, z2);
s2 = _mm_sub_ps(z1, z2);
- _mm_store_ps(dstp , _mm_add_ps(s0, b0));
+ _mm_store_ps(dstp, _mm_add_ps(s0, b0));
_mm_store_ps(dstp + 56, _mm_sub_ps(s0, b0));
- _mm_store_ps(dstp + 8, _mm_add_ps(s1, b1));
+ _mm_store_ps(dstp + 8, _mm_add_ps(s1, b1));
_mm_store_ps(dstp + 48, _mm_sub_ps(s1, b1));
_mm_store_ps(dstp + 16, _mm_add_ps(s2, b2));
_mm_store_ps(dstp + 40, _mm_sub_ps(s2, b2));
diff --git a/src/idct_ref_sse3.cpp b/src/idct_ref_sse3.cpp
index fa9fdc0..c6bd9a8 100644
--- a/src/idct_ref_sse3.cpp
+++ b/src/idct_ref_sse3.cpp
@@ -184,8 +184,8 @@ void idct_ref_sse3(int16_t* block)
void prefetch_ref()
{
- _mm_prefetch(reinterpret_cast(ref_dct_matrix_t + 0), _MM_HINT_NTA);
- _mm_prefetch(reinterpret_cast(ref_dct_matrix_t + 8), _MM_HINT_NTA);
+ _mm_prefetch(reinterpret_cast(ref_dct_matrix_t + 0), _MM_HINT_NTA);
+ _mm_prefetch(reinterpret_cast(ref_dct_matrix_t + 8), _MM_HINT_NTA);
_mm_prefetch(reinterpret_cast(ref_dct_matrix_t + 16), _MM_HINT_NTA);
_mm_prefetch(reinterpret_cast(ref_dct_matrix_t + 24), _MM_HINT_NTA);
_mm_prefetch(reinterpret_cast(ref_dct_matrix_t + 32), _MM_HINT_NTA);
diff --git a/src/mc.cpp b/src/mc.cpp
index 27fad74..9eef17f 100644
--- a/src/mc.cpp
+++ b/src/mc.cpp
@@ -22,8 +22,8 @@
*/
-// SSE2 intrinsic implementation
-// OKA Motofumi - August 23, 2016
+ // SSE2 intrinsic implementation
+ // OKA Motofumi - August 23, 2016
#include
@@ -59,7 +59,7 @@ static __forceinline void storeu(uint8_t* p, const __m128i& x)
}
-static void MC_put_8_c(uint8_t * dest, const uint8_t * ref, int stride, int, int height)
+static void MC_put_8_c(uint8_t* dest, const uint8_t* ref, int stride, int, int height)
{
do {
*reinterpret_cast(dest) = *reinterpret_cast(ref);
@@ -68,7 +68,7 @@ static void MC_put_8_c(uint8_t * dest, const uint8_t * ref, int stride, int, int
}
-static void MC_put_16_sse2(uint8_t * dest, const uint8_t * ref, int stride, int, int height)
+static void MC_put_16_sse2(uint8_t* dest, const uint8_t* ref, int stride, int, int height)
{
do {
storeu(dest, loadu(ref));
@@ -77,7 +77,7 @@ static void MC_put_16_sse2(uint8_t * dest, const uint8_t * ref, int stride, int,
}
-static void MC_avg_8_sse2(uint8_t * dest, const uint8_t * ref, int stride, int, int height)
+static void MC_avg_8_sse2(uint8_t* dest, const uint8_t* ref, int stride, int, int height)
{
do {
storel(dest, avgu8(loadl(ref), loadl(dest)));
@@ -86,7 +86,7 @@ static void MC_avg_8_sse2(uint8_t * dest, const uint8_t * ref, int stride, int,
}
-static void MC_avg_16_sse2(uint8_t * dest, const uint8_t * ref, int stride, int, int height)
+static void MC_avg_16_sse2(uint8_t* dest, const uint8_t* ref, int stride, int, int height)
{
do {
storeu(dest, avgu8(loadu(ref), loadu(dest)));
@@ -95,7 +95,7 @@ static void MC_avg_16_sse2(uint8_t * dest, const uint8_t * ref, int stride, int,
}
-static void MC_put_x8_sse2(uint8_t * dest, const uint8_t * ref, int stride, int, int height)
+static void MC_put_x8_sse2(uint8_t* dest, const uint8_t* ref, int stride, int, int height)
{
do {
storel(dest, avgu8(loadl(ref), loadl(ref + 1)));
@@ -104,7 +104,7 @@ static void MC_put_x8_sse2(uint8_t * dest, const uint8_t * ref, int stride, int,
}
-static void MC_put_y8_sse2(uint8_t * dest, const uint8_t * ref, int stride, int offs, int height)
+static void MC_put_y8_sse2(uint8_t* dest, const uint8_t* ref, int stride, int offs, int height)
{
do {
storel(dest, avgu8(loadl(ref), loadl(ref + offs)));
@@ -113,7 +113,7 @@ static void MC_put_y8_sse2(uint8_t * dest, const uint8_t * ref, int stride, int
}
-static void MC_put_x16_sse2(uint8_t * dest, const uint8_t * ref, int stride, int, int height)
+static void MC_put_x16_sse2(uint8_t* dest, const uint8_t* ref, int stride, int, int height)
{
do {
storeu(dest, avgu8(loadu(ref), loadu(ref + 1)));
@@ -169,7 +169,7 @@ static void MC_avg_y16_sse2(uint8_t* dest, const uint8_t* ref, int stride, int o
static __forceinline __m128i
get_correcter(const __m128i& r0, const __m128i& r1, const __m128i& r2, const __m128i& r3,
- const __m128i& avg0, const __m128i& avg1, const __m128i& one)
+ const __m128i& avg0, const __m128i& avg1, const __m128i& one)
{
__m128i t0 = _mm_or_si128(_mm_xor_si128(r0, r3), _mm_xor_si128(r1, r2));
t0 = _mm_and_si128(t0, _mm_xor_si128(avg0, avg1));
diff --git a/src/mc.h b/src/mc.h
index c392051..182391f 100644
--- a/src/mc.h
+++ b/src/mc.h
@@ -26,7 +26,7 @@
#include
-typedef void (MCFunc) (uint8_t* dest, const uint8_t* ref, int stride, int offs, int height);
+typedef void (MCFunc)(uint8_t* dest, const uint8_t* ref, int stride, int offs, int height);
typedef MCFunc* MCFuncPtr;
// Form prediction (motion compensation) function pointer array (GetPic.c) - Vlad59 04-20-2002
diff --git a/src/misc.cpp b/src/misc.cpp
index 731a111..35976b7 100644
--- a/src/misc.cpp
+++ b/src/misc.cpp
@@ -22,9 +22,9 @@
*/
+#include
#include
#include
-#include
#ifndef _WIN32
#include
@@ -53,9 +53,11 @@ fast_copy(const uint8_t* src, const int src_stride, uint8_t* dst,
{
if (vertical_size == 0) {
return;
- } else if (horizontal_size == src_stride && src_stride == dst_stride) {
+ }
+ else if (horizontal_size == src_stride && src_stride == dst_stride) {
memcpy(dst, src, static_cast(horizontal_size) * vertical_size);
- } else {
+ }
+ else {
do {
memcpy(dst, src, horizontal_size);
dst += dst_stride;
diff --git a/src/store.cpp b/src/store.cpp
index 27a8466..707f8da 100644
--- a/src/store.cpp
+++ b/src/store.cpp
@@ -24,15 +24,15 @@
*/
-#include "MPEG2Decoder.h"
-//#include "postprocess.h"
#include "color_convert.h"
+ //#include "postprocess.h"
#include "misc.h"
+#include "MPEG2Decoder.h"
// Write 2-digits numbers in a 16x16 zone.
static void write_quants(uint8_t* dst, int stride, int mb_width, int mb_height,
- const int* qp)
+ const int* qp)
{
const uint8_t rien[8] = { 0, 0, 0, 0, 0, 0, 0, 0 };
const uint8_t nums[10][8] = {
@@ -82,7 +82,7 @@ static void write_quants(uint8_t* dst, int stride, int mb_width, int mb_height,
dstp += 5;
int d = (number / 10) % 10;
num = nums[d]; // 0x0
- if (c==0 && d==0) num = rien;
+ if (c == 0 && d == 0) num = rien;
write(num, dstp, stride);
dstp += 5;
@@ -95,7 +95,7 @@ static void write_quants(uint8_t* dst, int stride, int mb_width, int mb_height,
static void set_qparams(const int* qp, size_t mb_size, int& minquant,
- int& maxquant, int& avgquant)
+ int& maxquant, int& avgquant)
{
int minq = qp[0], maxq = qp[0], sum = qp[0];
for (size_t i = 1; i < mb_size; ++i) {
@@ -133,20 +133,20 @@ void CMPEG2Decoder::assembleFrame(uint8_t* src[], int pf, YV12PICT& dst)
if (iPP == 1 || (iPP == -1 && pf == 0)) iPPt = true;
else iPPt = false;
postprocess(src, this->Coded_Picture_Width, this->Chroma_Width,
- ppptr, dst->ypitch, dst->uvpitch, this->Coded_Picture_Width,
- this->Coded_Picture_Height, this->QP, this->mb_width, pp_mode, moderate_h, moderate_v,
- chroma_format == 1 ? false : true, iPPt);
+ ppptr, dst->ypitch, dst->uvpitch, this->Coded_Picture_Width,
+ this->Coded_Picture_Height, this->QP, this->mb_width, pp_mode, moderate_h, moderate_v,
+ chroma_format == 1 ? false : true, iPPt);
if (upConv > 0 && chroma_format == 1)
{
if (iCC == 1 || (iCC == -1 && pf == 0))
{
- conv420to422I(ppptr[1],dst->u,dst->uvpitch,dst->uvpitch,Coded_Picture_Width,Coded_Picture_Height);
- conv420to422I(ppptr[2],dst->v,dst->uvpitch,dst->uvpitch,Coded_Picture_Width,Coded_Picture_Height);
+ conv420to422I(ppptr[1], dst->u, dst->uvpitch, dst->uvpitch, Coded_Picture_Width, Coded_Picture_Height);
+ conv420to422I(ppptr[2], dst->v, dst->uvpitch, dst->uvpitch, Coded_Picture_Width, Coded_Picture_Height);
}
else
{
- conv420to422P(ppptr[1],dst->u,dst->uvpitch,dst->uvpitch,Coded_Picture_Width,Coded_Picture_Height);
- conv420to422P(ppptr[2],dst->v,dst->uvpitch,dst->uvpitch,Coded_Picture_Width,Coded_Picture_Height);
+ conv420to422P(ppptr[1], dst->u, dst->uvpitch, dst->uvpitch, Coded_Picture_Width, Coded_Picture_Height);
+ conv420to422P(ppptr[2], dst->v, dst->uvpitch, dst->uvpitch, Coded_Picture_Width, Coded_Picture_Height);
}
}
}
@@ -158,11 +158,13 @@ void CMPEG2Decoder::assembleFrame(uint8_t* src[], int pf, YV12PICT& dst)
if (iCC == 1 || (iCC == -1 && pf == 0)) {
conv420to422I(src[1], dst.u, Chroma_Width, dst.uvpitch, Coded_Picture_Width, Coded_Picture_Height);
conv420to422I(src[2], dst.v, Chroma_Width, dst.uvpitch, Coded_Picture_Width, Coded_Picture_Height);
- } else {
+ }
+ else {
conv420to422P(src[1], dst.u, Chroma_Width, dst.uvpitch, Coded_Picture_Width, Coded_Picture_Height);
conv420to422P(src[2], dst.v, Chroma_Width, dst.uvpitch, Coded_Picture_Width, Coded_Picture_Height);
}
- } else {
+ }
+ else {
fast_copy(src[1], Chroma_Width, dst.u, dst.uvpitch, Chroma_Width, Chroma_Height);
fast_copy(src[2], Chroma_Width, dst.v, dst.uvpitch, Chroma_Width, Chroma_Height);
}
diff --git a/src/yv12pict.cpp b/src/yv12pict.cpp
index 12fdd86..daec044 100644
--- a/src/yv12pict.cpp
+++ b/src/yv12pict.cpp
@@ -19,10 +19,10 @@
*
*/
-// replace with one that doesn't need fixed size table - trbarry 3-22-2002
+ // replace with one that doesn't need fixed size table - trbarry 3-22-2002
-#include
#include
+#include
#include "yv12pict.h"
#ifndef _WIN32
@@ -86,7 +86,7 @@ YV12PICT::YV12PICT(PVideoFrame& frame) :
{}
-YV12PICT::YV12PICT(uint8_t* py, uint8_t* pu, uint8_t*pv, int yw, int cw, int h) :
+YV12PICT::YV12PICT(uint8_t* py, uint8_t* pu, uint8_t* pv, int yw, int cw, int h) :
allocated(false),
y(py), u(pu), v(pv),
ypitch((yw + 31) & ~31), uvpitch((cw + 15) & ~15),
diff --git a/src/yv12pict.h b/src/yv12pict.h
index 16e8f95..67ee145 100644
--- a/src/yv12pict.h
+++ b/src/yv12pict.h
@@ -2,20 +2,21 @@
#define YV12PICT_H
#include
-#include
+
+#include "avisynth.h"
class YV12PICT {
const bool allocated;
public:
- uint8_t *y, *u, *v;
+ uint8_t* y, * u, * v;
int ypitch, uvpitch;
int ywidth, uvwidth;
int yheight, uvheight;
int pf;
YV12PICT(PVideoFrame& frame);
- YV12PICT(uint8_t* py, uint8_t* pu, uint8_t*pv, int yw, int cw, int h);
+ YV12PICT(uint8_t* py, uint8_t* pu, uint8_t* pv, int yw, int cw, int h);
YV12PICT(int height, int width, int chroma_format);
~YV12PICT();
};