diff --git a/example/autoEncoder/autoencoder.jpg b/example/autoEncoder/autoencoder.jpg new file mode 100644 index 0000000..a814ce5 Binary files /dev/null and b/example/autoEncoder/autoencoder.jpg differ diff --git a/example/autoEncoder/cpp_example.cpp b/example/autoEncoder/cpp_example.cpp new file mode 100644 index 0000000..3943203 --- /dev/null +++ b/example/autoEncoder/cpp_example.cpp @@ -0,0 +1,150 @@ + +#include +#include +#include +#include +#include +#include + +#include "../cpp/snNet.h" +#include "../cpp/snTensor.h" +#include "../cpp/snOperator.h" + +#include "Lib/OpenCV_3.3.0/opencv2/core/core_c.h" +#include "Lib/OpenCV_3.3.0/opencv2/core/core.hpp" +#include "Lib/OpenCV_3.3.0/opencv2/imgproc/imgproc_c.h" +#include "Lib/OpenCV_3.3.0/opencv2/imgproc/imgproc.hpp" +#include "Lib/OpenCV_3.3.0/opencv2/highgui/highgui_c.h" +#include "Lib/OpenCV_3.3.0/opencv2/highgui/highgui.hpp" + +using namespace std; +namespace sn = SN_API; + +bool loadImage(string& imgPath, int classCnt, vector>& imgName, vector& imgCntDir, map& images){ + + for (int i = 0; i < classCnt; ++i){ + + namespace fs = std::tr2::sys; + + if (!fs::exists(fs::path(imgPath + to_string(i) + "/"))) continue; + + fs::directory_iterator it(imgPath + to_string(i) + "/"); int cnt = 0; + while (it != fs::directory_iterator()){ + + fs::path p = it->path(); + if (fs::is_regular_file(p) && (p.extension() == ".png")){ + + imgName[i].push_back(p.filename()); + } + ++it; + ++cnt; + } + + imgCntDir[i] = cnt; + } + + return true; +} + +int main(int argc, char* argv[]){ + + sn::Net snet; + + snet.addNode("Input", sn::Input(), "FC1") + .addNode("FC1", sn::FullyConnected(256, sn::active::relu), "FC2") + .addNode("FC2", sn::FullyConnected(128, sn::active::relu), "FC3") + .addNode("FC3", sn::FullyConnected(32, sn::active::relu), "FC4") + .addNode("FC4", sn::FullyConnected(128, sn::active::relu), "FC5") + .addNode("FC5", sn::FullyConnected(256, sn::active::relu), "FC6") + .addNode("FC6", sn::FullyConnected(784, sn::active::sigmoid), "LS") + .addNode("LS", sn::LossFunction(sn::lossType::binaryCrossEntropy), "Output"); + + string imgPath = "c://cpp//skyNet//example//autoEncoder//images//"; + + int classCnt = 5, batchSz = 100, w = 28, h = 28; + float lr = 0.001F; + + vector> imgName(classCnt); + vector imgCntDir(classCnt); + map images; + + if (!loadImage(imgPath, classCnt, imgName, imgCntDir, images)){ + cout << "Error 'loadImage' path: " << imgPath << endl; + system("pause"); + return -1; + } + + //snet.loadAllWeightFromFile("c:\\cpp\\w.dat"); + + + sn::Tensor inLayer(sn::snLSize(w, h, 1, batchSz)); + sn::Tensor outLayer(sn::snLSize(w * h, 1, 1, batchSz)); + + size_t sum_metric = 0; + size_t num_inst = 0; + float accuratSumm = 0; + for (int k = 0; k < 1000; ++k){ + + srand(clock()); + + for (int i = 0; i < batchSz; ++i){ + + // directory + int ndir = rand() % classCnt; + while (imgCntDir[ndir] == 0) + ndir = rand() % classCnt; + + // image + int nimg = rand() % imgCntDir[ndir]; + + // read + cv::Mat img; + string nm = imgName[ndir][nimg]; + if (images.find(nm) != images.end()) + img = images[nm]; + else{ + img = cv::imread(imgPath + to_string(ndir) + "/" + nm, CV_LOAD_IMAGE_UNCHANGED); + images[nm] = img; + } + + float* refData = inLayer.data() + i * w * h; + + size_t nr = img.rows, nc = img.cols; + for (size_t r = 0; r < nr; ++r){ + uchar* pt = img.ptr(r); + for (size_t c = 0; c < nc; ++c) + refData[r * nc + c] = pt[c] / 255.0; + } + } + + // training + float accurat = 0; + snet.training(lr, + inLayer, + outLayer, + inLayer, + accurat); + + float* refData = outLayer.data(); + + cv::Mat img(w, h, CV_8U); + for (size_t r = 0; r < h; ++r){ + uchar* pt = img.ptr(r); + for (size_t c = 0; c < w; ++c) + pt[c] = refData[r * w + c] * 255.0; + } + + cv::namedWindow("1", 0); + cv::imshow("1", img); + cv::waitKey(1); + + accuratSumm += accurat; + + cout << k << " accurate " << accuratSumm / k << " " << snet.getLastErrorStr() << endl; + } + + snet.saveAllWeightToFile("c:\\cpp\\w.dat"); + + system("pause"); + return 0; +} diff --git a/example/autoEncoder/images.rar b/example/autoEncoder/images.rar new file mode 100644 index 0000000..660e02e Binary files /dev/null and b/example/autoEncoder/images.rar differ diff --git a/example/autoEncoder/python_example.py b/example/autoEncoder/python_example.py new file mode 100644 index 0000000..2bbbb9d --- /dev/null +++ b/example/autoEncoder/python_example.py @@ -0,0 +1,67 @@ + +import os + +from libskynet import* +import numpy as np +import imageio +import random +import ctypes +import datetime + + +# create net +net = snNet.Net() +net.addNode('In', snOperator.Input(), 'FC1') \ + .addNode('FC1', snOperator.FullyConnected(256), 'FC2') \ + .addNode('FC2', snOperator.FullyConnected(128), 'FC3') \ + .addNode('FC3', snOperator.FullyConnected(32), 'FC4') \ + .addNode('FC4', snOperator.FullyConnected(128), 'FC5') \ + .addNode('FC5', snOperator.FullyConnected(256), 'FC6') \ + .addNode('FC6', snOperator.FullyConnected(784), 'LS') \ + .addNode('LS', snOperator.LossFunction(snType.lossType.binaryCrossEntropy), 'Output') + +# load of weight +#if (net.loadAllWeightFromFile('c:/cpp/w.dat')): + # print('weight is load') +#else: +# print('error load weight') + +# loadImg +imgList = [] +pathImg = 'c:\\cpp\\skyNet\\example\\autoEncoder\\images\\' +for i in range(10): + imgList.append(os.listdir(pathImg + str(i))) + +bsz = 100 +lr = 0.001 +accuratSumm = 0. +inLayer = np.zeros((bsz, 1, 28, 28), ctypes.c_float) +outLayer = np.zeros((bsz, 1, 1, 28 * 28), ctypes.c_float) +imgMem = {} + +# cycle lern +for n in range(1000): + + for i in range(bsz): + ndir = random.randint(0, 10 - 1) + nimg = random.randint(0, len(imgList[ndir]) - 1) + + nm = pathImg + str(ndir) + '/' + imgList[ndir][nimg] + if (nm in imgMem): + inLayer[i][0] = imgMem[nm] + else: + inLayer[i][0] = imageio.imread(nm) + imgMem[nm] = inLayer[i][0].copy() + + acc = [0] + net.training(lr, inLayer, outLayer, inLayer, acc) + + accuratSumm += acc[0]/bsz + + print(datetime.datetime.now().strftime('%H:%M:%S'), n, "accurate", accuratSumm / (n + 1)) + +# save weight +if (net.saveAllWeightToFile('c:/cpp/w.dat')): + print('weight is save') +else: + print('error save weight') \ No newline at end of file diff --git a/mswin/VS12/skyNet.sln b/mswin/VS12/skyNet.sln index 490c007..d73663a 100644 --- a/mswin/VS12/skyNet.sln +++ b/mswin/VS12/skyNet.sln @@ -1,7 +1,7 @@  Microsoft Visual Studio Solution File, Format Version 12.00 # Visual Studio 2013 -VisualStudioVersion = 12.0.31101.0 +VisualStudioVersion = 12.0.40629.0 MinimumVisualStudioVersion = 10.0.40219.1 Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "snEngine", "SNEngine.vcxproj", "{87E622F8-2436-40B6-A5A6-3F5DDE14BAC5}" ProjectSection(ProjectDependencies) = postProject @@ -28,6 +28,7 @@ Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "snAux", "SNAux.vcxproj", "{ EndProject Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "skynet", "SkyNet.vcxproj", "{507A9B3C-E5E6-4CEC-A99D-729D2171DF81}" ProjectSection(ProjectDependencies) = postProject + {2C7FA033-361B-4429-97E9-64F2A0A20CBE} = {2C7FA033-361B-4429-97E9-64F2A0A20CBE} {C993F645-29F0-4079-ACBA-033BFED8B8F6} = {C993F645-29F0-4079-ACBA-033BFED8B8F6} {F3E981BD-4BA4-4538-9D21-6ACD252F4895} = {F3E981BD-4BA4-4538-9D21-6ACD252F4895} {87E622F8-2436-40B6-A5A6-3F5DDE14BAC5} = {87E622F8-2436-40B6-A5A6-3F5DDE14BAC5} @@ -39,6 +40,9 @@ EndProject Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "snSIMD", "snSIMD.vcxproj", "{30FAF753-DBAE-4701-B5F4-29C7FBCBF9F6}" EndProject Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "snOperatorCPU", "snOperatorCPU.vcxproj", "{F3E981BD-4BA4-4538-9D21-6ACD252F4895}" + ProjectSection(ProjectDependencies) = postProject + {30FAF753-DBAE-4701-B5F4-29C7FBCBF9F6} = {30FAF753-DBAE-4701-B5F4-29C7FBCBF9F6} + EndProjectSection EndProject Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "snOperatorCUDA", "snOperatorCUDA.vcxproj", "{2C7FA033-361B-4429-97E9-64F2A0A20CBE}" EndProject diff --git a/mswin/VS12/skyNet.vcxproj b/mswin/VS12/skyNet.vcxproj index 2f0ec4c..deb2624 100644 --- a/mswin/VS12/skyNet.vcxproj +++ b/mswin/VS12/skyNet.vcxproj @@ -76,7 +76,7 @@ Windows true $(OutDir) - snOperatorCPU.lib;snAux.lib;snEngine.lib;%(AdditionalDependencies) + snOperatorCUDA.lib;snAux.lib;snEngine.lib;%(AdditionalDependencies) @@ -97,7 +97,7 @@ true true $(OutDir) - snOperatorCPU.lib;snAux.lib;snEngine.lib;%(AdditionalDependencies) + snOperatorCUDA.lib;snAux.lib;snEngine.lib;%(AdditionalDependencies) diff --git a/mswin/VS12/snOperatorCUDA.vcxproj b/mswin/VS12/snOperatorCUDA.vcxproj index 75592d0..7fb5205 100644 --- a/mswin/VS12/snOperatorCUDA.vcxproj +++ b/mswin/VS12/snOperatorCUDA.vcxproj @@ -64,7 +64,8 @@ true true - /arch:AVX + + Windows @@ -132,7 +133,8 @@ true - /arch:AVX + + Windows diff --git a/src/skynet/src/snet.cpp b/src/skynet/src/snet.cpp index dd633b3..f64d3bb 100644 --- a/src/skynet/src/snet.cpp +++ b/src/skynet/src/snet.cpp @@ -195,19 +195,14 @@ bool SNet::createNet(Net& inout_net, std::string& out_err){ SNet::SNet(const char* jnNet, char* out_err /*sz 256*/, SN_API::snStatusCBack sts, SN_API::snUData ud) : stsCBack_(sts), udata_(ud){ - string err; SN_Base::Net net; - if (!jnParseNet(jnNet, net, err)){ + string err; + SN_Base::Net net; + if (!jnParseNet(jnNet, net, err) || !createNet(net, err)){ statusMess(err); strcpy(out_err, err.c_str()); return; } - if (!createNet(net, err)){ - statusMess(err); - strcpy(out_err, err.c_str()); - return; - } - nodes_ = net.nodes; operats_ = net.operats; @@ -352,7 +347,7 @@ bool SNet::getWeightNode(const char* nodeName, SN_Base::snSize& wsz, SN_Base::sn return false; } - auto weight = operats_[nodeName]->getWeight(); + const Tensor& weight = operats_[nodeName]->getWeight(); wsz = weight.size(); @@ -496,8 +491,10 @@ bool SNet::saveAllWeightToFile(const char* filePath){ snSize lSize; for (auto opr : operats_){ - data = opr.second->getWeight().getDataCPU(); - lSize = opr.second->getWeight().size(); + const Tensor& wt = opr.second->getWeight(); + + data = wt.getDataCPU(); + lSize = wt.size(); if (data){ ofs << opr.first << " w " << lSize.w << " " << lSize.h << " " << lSize.d << endl; diff --git a/src/snBase/snBase.h b/src/snBase/snBase.h index 600b8b4..6a81895 100644 --- a/src/snBase/snBase.h +++ b/src/snBase/snBase.h @@ -187,23 +187,23 @@ namespace SN_Base{ return true; } - virtual std::map getInternPrm() const final{ + virtual std::map getInternPrm() const{ return basePrms_; } - - virtual const SN_Base::Tensor& getWeight() const final{ - return baseWeight_; - } - + virtual batchNorm getBatchNorm() const{ return baseBatchNorm_; } - virtual const SN_Base::Tensor& getOutput() const final{ + virtual const SN_Base::Tensor& getWeight() const{ + return baseWeight_; + } + + virtual const SN_Base::Tensor& getOutput() const{ return baseOut_; } - virtual const SN_Base::Tensor& getGradient() const final{ + virtual const SN_Base::Tensor& getGradient() const{ return baseGrad_; } diff --git a/src/snEngine/src/snEngine.cpp b/src/snEngine/src/snEngine.cpp index 33229e9..0568e09 100644 --- a/src/snEngine/src/snEngine.cpp +++ b/src/snEngine/src/snEngine.cpp @@ -506,10 +506,8 @@ namespace SN_Eng{ std::string nnameMem = nname; - nname = ""; - - auto& nodes = nodes_; - + nname.clear(); + while (!fWorkEnd_){ /// ждем след итерацию @@ -517,10 +515,10 @@ namespace SN_Eng{ nname = thrPoolForward_->waitStart(nnameMem); /// обработка текущего узла - actionForward(nodes, nname); + actionForward(nodes_, nname); /// выбор следующего узла - nname = selectNextForward(nodes, nname, nnameMem); + nname = selectNextForward(nodes_, nname, nnameMem); } } @@ -529,10 +527,8 @@ namespace SN_Eng{ std::string nnameMem = nname; - nname = ""; - - auto& nodes = nodes_; - + nname.clear(); + while (!fWorkEnd_){ /// ждем след итерацию @@ -540,10 +536,10 @@ namespace SN_Eng{ nname = thrPoolBackward_->waitStart(nnameMem); /// обработка текущего узла - actionBackward(nodes, nname); + actionBackward(nodes_, nname); /// выбор следующего узла - nname = selectNextBackward(nodes, nname, nnameMem); + nname = selectNextBackward(nodes_, nname, nnameMem); } } } diff --git a/src/snOperatorCPU/src/CPU/tensor.cpp b/src/snOperatorCPU/src/CPU/tensor.cpp index 66a14fc..21600e7 100644 --- a/src/snOperatorCPU/src/CPU/tensor.cpp +++ b/src/snOperatorCPU/src/CPU/tensor.cpp @@ -33,7 +33,7 @@ Tensor& Tensor::operator=(const Tensor& other){ Tensor& Tensor::operator+=(const Tensor& other){ - ASSERT_MESS(other == *this, ""); + ASSERT_MESS(other == *this, "Tensor::operator+=: other == *this"); auto od = other.getDataCPU(); @@ -47,7 +47,7 @@ Tensor& Tensor::operator+=(const Tensor& other){ Tensor& Tensor::operator-=(const Tensor& other){ - ASSERT_MESS(other == *this, ""); + ASSERT_MESS(other == *this, "Tensor::operator-=: other == *this"); auto od = other.getDataCPU(); @@ -62,7 +62,7 @@ Tensor& Tensor::operator-=(const Tensor& other){ void Tensor::setDataCPU(const snFloat* data, const snSize& nsz){ size_t nnsz = nsz.size(); - ASSERT_MESS(data && (nnsz > 0), ""); + ASSERT_MESS(data && (nnsz > 0), "Tensor::setDataCPU: data && (nnsz > 0)"); if (sz_.size() < nnsz) dataCPU_ = (snFloat*)realloc(dataCPU_, nnsz * sizeof(snFloat)); @@ -79,7 +79,7 @@ snFloat* Tensor::getDataCPU() const{ void Tensor::resize(const snSize& nsz){ size_t nnsz = nsz.size(), csz = sz_.size(); - ASSERT_MESS(nnsz > 0, ""); + ASSERT_MESS(nnsz > 0, "Tensor::resize: nnsz > 0"); if (csz < nnsz){ dataCPU_ = (snFloat*)realloc(dataCPU_, nnsz * sizeof(snFloat)); diff --git a/src/snOperatorCPU/src/Operator/fullyConnected.cpp b/src/snOperatorCPU/src/Operator/fullyConnected.cpp index 4ba7342..36f29f1 100644 --- a/src/snOperatorCPU/src/Operator/fullyConnected.cpp +++ b/src/snOperatorCPU/src/Operator/fullyConnected.cpp @@ -100,7 +100,7 @@ void FullyConnected::load(std::map& prms){ bool FullyConnected::setInternPrm(std::map& prms){ basePrms_ = prms; - + if (prms.find("active") != prms.end()){ string atype = prms["active"]; @@ -222,7 +222,8 @@ void FullyConnected::forward(const SN_Base::Tensor& inTns, const operationParam& } /// calculation of the output values of neurons - snFloat* out = baseOut_.getDataCPU(), *weight = baseWeight_.getDataCPU(); + snFloat* out = baseOut_.getDataCPU(), + * weight = baseWeight_.getDataCPU(); // +bias? if (!useBias_){ @@ -265,10 +266,12 @@ void FullyConnected::backward(const SN_Base::Tensor& inTns, const operationParam snFloat* out = baseOut_.getDataCPU(); size_t osz = kernel_ * inSzMem_.n; + activationBackward(osz, out, activeType_); // update grad - for (size_t i = 0; i < osz; ++i) gradIn[i] *= out[i]; + for (size_t i = 0; i < osz; ++i) + gradIn[i] *= out[i]; } /// batchNorm @@ -310,7 +313,8 @@ void FullyConnected::backward(const SN_Base::Tensor& inTns, const operationParam void FullyConnected::updateConfig(bool isLern, const snSize& newsz){ - size_t stp = newsz.w * newsz.h * newsz.d, ntp = (stp + 1) * kernel_; + size_t stp = newsz.w * newsz.h * newsz.d, + ntp = (stp + 1) * kernel_; // leave the existing weights as they are, initialize the remainder size_t wcsz = baseWeight_.size().size(); @@ -332,7 +336,7 @@ void FullyConnected::updateConfig(bool isLern, const snSize& newsz){ auxParams_["dWGrad"].resize(ntp, 0); if (batchNormType_ != batchNormType::none){ - auxParams_["bn_norm"].resize(newsz.n * kernel_, 0); baseBatchNorm_.norm = auxParams_["bn_norm"].data(); + auxParams_["bn_norm"].resize(newsz.n * kernel_, 0); baseBatchNorm_.norm = auxParams_["bn_norm"].data(); } } } \ No newline at end of file diff --git a/src/snOperatorCPU/src/Operator/fullyConnected.h b/src/snOperatorCPU/src/Operator/fullyConnected.h index 0f53f7b..cb85488 100644 --- a/src/snOperatorCPU/src/Operator/fullyConnected.h +++ b/src/snOperatorCPU/src/Operator/fullyConnected.h @@ -42,6 +42,7 @@ class FullyConnected final : SN_Base::OperatorBase{ bool setBatchNorm(const SN_Base::batchNorm& bn) override; + private: size_t kernel_ = 10; ///< number of hidden neurons diff --git a/src/snOperatorCPU/src/snOperator.cpp b/src/snOperatorCPU/src/snOperator.cpp index 9de15b3..2bbc611 100644 --- a/src/snOperatorCPU/src/snOperator.cpp +++ b/src/snOperatorCPU/src/snOperator.cpp @@ -64,7 +64,7 @@ namespace SN_Opr{ else if (fname == "Resize") ret = (SN_Base::OperatorBase*)new Resize(net, fname, node, prms); else if (fname == "BatchNorm") ret = (SN_Base::OperatorBase*)new BatchNorm(net, fname, node, prms); else if (fname == "Activation") ret = (SN_Base::OperatorBase*)new Activation(net, fname, node, prms); - + return ret; } diff --git a/src/snOperatorCUDA/src/Operator/convolution.cpp b/src/snOperatorCUDA/src/Operator/convolution.cpp index d34afb0..fea2b81 100644 --- a/src/snOperatorCUDA/src/Operator/convolution.cpp +++ b/src/snOperatorCUDA/src/Operator/convolution.cpp @@ -275,9 +275,7 @@ void Convolution::forward(const SN_Base::Tensor& inTns, const operationParam& op // batchNorm if (batchNormType_ == batchNormType::beforeActive) batchNormForward(operPrm.isLerning, outsz, out, out, baseBatchNorm_); - - // auto bn = getBatchNorm(); - + /// active function if (activeType_ != activeType::none) activationForward(outsz, out, activeType_);