Skip to content

Commit

Permalink
Fix git and openvino_tokenizer path
Browse files Browse the repository at this point in the history
  • Loading branch information
Wovchena committed May 9, 2024
1 parent fbc55e1 commit 9f61788
Show file tree
Hide file tree
Showing 9 changed files with 20 additions and 20 deletions.
8 changes: 4 additions & 4 deletions .github/workflows/lcm_dreamshaper_cpp.yml
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ jobs:
run: |
conda activate openvino_lcm_cpp
conda update -c conda-forge --all
conda install -c conda-forge openvino=2024.1.0 c-compiler cxx-compiler make cmake
conda install -c conda-forge openvino=2024.1.0 c-compiler cxx-compiler git make cmake
conda env config vars set LD_LIBRARY_PATH=$CONDA_PREFIX/lib:$LD_LIBRARY_PATH
- name: Install python dependencies
Expand All @@ -53,7 +53,7 @@ jobs:
working-directory: ${{ env.working_directory }}
run: |
conda activate openvino_lcm_cpp
optimum-cli export openvino --model SimianLuo/LCM_Dreamshaper_v7 --convert-tokenizer --weight-format fp16 models/lcm_dreamshaper_v7/FP16
optimum-cli export openvino --model SimianLuo/LCM_Dreamshaper_v7 --weight-format fp16 models/lcm_dreamshaper_v7/FP16
- name: Build app
working-directory: ${{ env.working_directory }}
Expand Down Expand Up @@ -84,7 +84,7 @@ jobs:
run: |
conda activate openvino_lcm_cpp
conda update -c conda-forge --all
conda install -c conda-forge openvino=2024.1.0 c-compiler cxx-compiler make cmake
conda install -c conda-forge openvino=2024.1.0 c-compiler cxx-compiler git make cmake
conda env config vars set LD_LIBRARY_PATH=$CONDA_PREFIX/lib:$LD_LIBRARY_PATH
- name: Install python dependencies
Expand All @@ -98,7 +98,7 @@ jobs:
working-directory: ${{ env.working_directory }}
run: |
conda activate openvino_lcm_cpp
optimum-cli export openvino --model SimianLuo/LCM_Dreamshaper_v7 --convert-tokenizer --weight-format fp16 models/lcm_dreamshaper_v7/FP16
optimum-cli export openvino --model SimianLuo/LCM_Dreamshaper_v7 --weight-format fp16 models/lcm_dreamshaper_v7/FP16
- name: Build app
working-directory: ${{ env.working_directory }}
Expand Down
8 changes: 4 additions & 4 deletions .github/workflows/stable_diffusion_1_5_cpp.yml
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ jobs:
- name: Install OpenVINO and other conda dependencies
run: |
conda activate openvino_sd_cpp
conda install -c conda-forge openvino=2024.1.0 c-compiler cxx-compiler make cmake
conda install -c conda-forge openvino=2024.1.0 c-compiler cxx-compiler git make cmake
conda env config vars set LD_LIBRARY_PATH=$CONDA_PREFIX/lib:$LD_LIBRARY_PATH
- name: Install python dependencies
Expand All @@ -52,7 +52,7 @@ jobs:
working-directory: ${{ env.working_directory }}
run: |
conda activate openvino_sd_cpp
optimum-cli export openvino --model runwayml/stable-diffusion-v1-5 --task stable-diffusion --convert-tokenizer --weight-format fp16 models/stable_diffusion_v1_5_ov/FP16
optimum-cli export openvino --model runwayml/stable-diffusion-v1-5 --task stable-diffusion --weight-format fp16 models/stable_diffusion_v1_5_ov/FP16
- name: Build app
working-directory: ${{ env.working_directory }}
Expand Down Expand Up @@ -82,7 +82,7 @@ jobs:
- name: Install OpenVINO and other conda dependencies
run: |
conda activate openvino_sd_cpp
conda install -c conda-forge openvino=2024.1.0 c-compiler cxx-compiler make cmake
conda install -c conda-forge openvino=2024.1.0 c-compiler cxx-compiler git make cmake
- name: Install python dependencies
working-directory: ${{ env.working_directory }}
Expand All @@ -95,7 +95,7 @@ jobs:
working-directory: ${{ env.working_directory }}
run: |
conda activate openvino_sd_cpp
optimum-cli export openvino --model runwayml/stable-diffusion-v1-5 --task stable-diffusion --convert-tokenizer --weight-format fp16 models/stable_diffusion_v1_5_ov/FP16
optimum-cli export openvino --model runwayml/stable-diffusion-v1-5 --task stable-diffusion --weight-format fp16 models/stable_diffusion_v1_5_ov/FP16
- name: Build app
working-directory: ${{ env.working_directory }}
Expand Down
4 changes: 2 additions & 2 deletions image_generation/lcm_dreamshaper_v7/cpp/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ Prepare a python environment and install dependencies:
conda create -n openvino_lcm_cpp python==3.10
conda activate openvino_lcm_cpp
conda update -c conda-forge --all
conda install -c conda-forge openvino=2024.1.0 c-compiler cxx-compiler make cmake
conda install -c conda-forge openvino=2024.1.0 c-compiler cxx-compiler git make cmake
# Ensure that Conda standard libraries are used
conda env config vars set LD_LIBRARY_PATH=$CONDA_PREFIX/lib:$LD_LIBRARY_PATH
```
Expand All @@ -38,7 +38,7 @@ conda env config vars set LD_LIBRARY_PATH=$CONDA_PREFIX/lib:$LD_LIBRARY_PATH

2. Download the model from Huggingface and convert it to OpenVINO IR via [optimum-intel CLI](https://github.com/huggingface/optimum-intel). Example command for downloading and exporting FP16 model:

`optimum-cli export openvino --model SimianLuo/LCM_Dreamshaper_v7 --convert-tokenizer --weight-format fp16 models/lcm_dreamshaper_v7/FP16`
`optimum-cli export openvino --model SimianLuo/LCM_Dreamshaper_v7 --weight-format fp16 models/lcm_dreamshaper_v7/FP16`

### LoRA enabling with safetensors

Expand Down
2 changes: 1 addition & 1 deletion image_generation/lcm_dreamshaper_v7/cpp/src/main.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -170,7 +170,7 @@ StableDiffusionModels compile_models(const std::string& model_path,
// Tokenizer
{
// Tokenizer model wil be loaded to CPU: OpenVINO Tokenizers can be inferred on a CPU device only.
models.tokenizer = core.compile_model(model_path + "/tokenizer/openvino_tokenizer.xml", "CPU");
models.tokenizer = core.compile_model(model_path + "/openvino_tokenizer/openvino_tokenizer.xml", "CPU");
}

return models;
Expand Down
4 changes: 2 additions & 2 deletions image_generation/stable_diffusion_1_5/cpp/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ Prepare a python environment and install dependencies:
```shell
conda create -n openvino_sd_cpp python==3.10
conda activate openvino_sd_cpp
conda install -c conda-forge openvino=2024.1.0 c-compiler cxx-compiler make cmake
conda install -c conda-forge openvino=2024.1.0 c-compiler cxx-compiler git make cmake
# Ensure that Conda standard libraries are used
conda env config vars set LD_LIBRARY_PATH=$CONDA_PREFIX/lib:$LD_LIBRARY_PATH
```
Expand All @@ -41,7 +41,7 @@ python -m pip install ../../../thirdparty/openvino_tokenizers/[transformers]

Example command for downloading and exporting FP16 model:

`optimum-cli export openvino --model dreamlike-art/dreamlike-anime-1.0 --task stable-diffusion --convert-tokenizer --weight-format fp16 models/dreamlike_anime_1_0_ov/FP16`
`optimum-cli export openvino --model dreamlike-art/dreamlike-anime-1.0 --task stable-diffusion --weight-format fp16 models/dreamlike_anime_1_0_ov/FP16`

You can also choose other precision and export FP32 or INT8 model.

Expand Down
2 changes: 1 addition & 1 deletion image_generation/stable_diffusion_1_5/cpp/src/main.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -194,7 +194,7 @@ StableDiffusionModels compile_models(const std::string& model_path,
{
Timer t("Loading and compiling tokenizer");
// Tokenizer model wil be loaded to CPU: OpenVINO Tokenizers can be inferred on a CPU device only.
models.tokenizer = core.compile_model(model_path + "/tokenizer/openvino_tokenizer.xml", "CPU");
models.tokenizer = core.compile_model(model_path + "/openvino_tokenizer/openvino_tokenizer.xml", "CPU");
}

return models;
Expand Down
4 changes: 2 additions & 2 deletions text_generation/causal_lm/cpp/beam_search_causal_lm.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -32,13 +32,13 @@ int main(int argc, char* argv[]) try {
ov::Core core;
core.add_extension(OPENVINO_TOKENIZERS_PATH); // OPENVINO_TOKENIZERS_PATH is defined in CMakeLists.txt
//Read the tokenizer model information from the file to later get the runtime information
auto tokenizer_model = core.read_model(std::string{argv[1]} + "/openvino_tokenizer.xml");
auto tokenizer_model = core.read_model(std::string{argv[1]} + "/openvino_tokenizer/openvino_tokenizer.xml");
// tokenizer and detokenizer work on CPU only
ov::InferRequest tokenizer = core.compile_model(
tokenizer_model, "CPU").create_infer_request();
auto [input_ids, attention_mask] = tokenize(tokenizer, argv[2]);
ov::InferRequest detokenizer = core.compile_model(
std::string{argv[1]} + "/openvino_detokenizer.xml", "CPU").create_infer_request();
std::string{argv[1]} + "/openvino_tokenizer/openvino_detokenizer.xml", "CPU").create_infer_request();
// The model can be compiled for GPU as well
ov::InferRequest lm = core.compile_model(
std::string{argv[1]} + "/openvino_model.xml", "CPU").create_infer_request();
Expand Down
4 changes: 2 additions & 2 deletions text_generation/causal_lm/cpp/greedy_causal_lm.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -62,13 +62,13 @@ int main(int argc, char* argv[]) try {
ov::Core core;
core.add_extension(OPENVINO_TOKENIZERS_PATH); // OPENVINO_TOKENIZERS_PATH is defined in CMakeLists.txt
//Read the tokenizer model information from the file to later get the runtime information
auto tokenizer_model = core.read_model(std::string{argv[1]} + "/openvino_tokenizer.xml");
auto tokenizer_model = core.read_model(std::string{argv[1]} + "/openvino_tokenizer/openvino_tokenizer.xml");
// tokenizer and detokenizer work on CPU only
ov::InferRequest tokenizer = core.compile_model(
tokenizer_model, "CPU").create_infer_request();
auto [input_ids, attention_mask] = tokenize(tokenizer, argv[2]);
ov::InferRequest detokenizer = core.compile_model(
std::string{argv[1]} + "/openvino_detokenizer.xml", "CPU").create_infer_request();
std::string{argv[1]} + "/openvino_tokenizer/openvino_detokenizer.xml", "CPU").create_infer_request();
// The model can be compiled for GPU as well
ov::InferRequest lm = core.compile_model(
std::string{argv[1]} + "/openvino_model.xml", "CPU").create_infer_request();
Expand Down
4 changes: 2 additions & 2 deletions text_generation/causal_lm/cpp/speculative_decoding_lm.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -119,10 +119,10 @@ int main(int argc, char* argv[]) try {
core.add_extension(OPENVINO_TOKENIZERS_PATH); // OPENVINO_TOKENIZERS_PATH is defined in CMakeLists.txt
// tokenizer and detokenizer work on CPU only
ov::InferRequest tokenizer = core.compile_model(
std::string{argv[1]} + "/openvino_tokenizer.xml", "CPU").create_infer_request();
std::string{argv[1]} + "/openvino_tokenizer/openvino_tokenizer.xml", "CPU").create_infer_request();
auto [draft_input_ids, draft_attention_mask] = tokenize(tokenizer, argv[3]);
ov::InferRequest detokenizer = core.compile_model(
std::string{argv[1]} + "/openvino_detokenizer.xml", "CPU").create_infer_request();
std::string{argv[1]} + "/openvino_tokenizer/openvino_detokenizer.xml", "CPU").create_infer_request();
TextStreamer text_streamer{std::move(detokenizer)};

// draft model
Expand Down

0 comments on commit 9f61788

Please sign in to comment.