Skip to content

Commit c42f519

Browse files
committed
release v0.2
1 parent a1e68b0 commit c42f519

File tree

8 files changed

+139
-19
lines changed

8 files changed

+139
-19
lines changed

README.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,7 @@
2828
- **Versatile Applications**: Ready to use as a best-in-class reranker to improve editing outputs, or as a high-fidelity reward signal for **stable and effective Reinforcement Learning (RL) fine-tuning**.
2929

3030
## 🔥 News
31+
- **2025-10-27**: Released [OmniGen2-EditScore7B-v1.1](https://huggingface.co/OmniGen2/OmniGen2-EditScore7B-v1.1), achieving a **7.01 GEdit score** within **700 steps**, by incorporating the **reweighting strategy** from [TempFlow](https://arxiv.org/abs/2508.04324). Furthermore, we improve json fixing method using [this great library](https://github.com/mangiucugna/json_repair), now EditScore should be more stable across various conditions. Updated to it with `pip install -U editscore`.
3132
- **2025-10-22**: **Introducing Our Reinforcement Learning Training Framework!**
3233
We're excited to release our complete RL pipeline, the result of a massive effort to simplify fine-tuning for image editing models. Key features include:
3334
- **Ready-to-Use RL Dataset**: Includes the complete dataset used in the EditScore project, along with clear usage guidelines and preparation scripts.

editscore/utils.py

Lines changed: 4 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@
44
import regex as re
55
import ast
66
import random
7+
import json_repair
78

89
def fix_json(input_str):
910
# Add double quotes around keys using regex
@@ -179,21 +180,21 @@ def fallback_repair_json(input_str: str) -> str:
179180

180181
def robust_json_fix(s: str):
181182
try:
182-
return json.loads(s)
183+
return json_repair.loads(s)
183184
except Exception:
184185
pass
185186

186187
for fixer in [fix_json, repair_reasoning_field_robust]:
187188
s = fixer(s)
188189
try:
189-
return json.loads(s)
190+
return json_repair.loads(s)
190191
except Exception:
191192
print(f"Error: Cannot fix {fixer.__name__} {s=}")
192193
continue
193194

194195
try:
195196
repaired_str = fallback_repair_json(s)
196-
return json.loads(repaired_str)
197+
return json_repair.loads(repaired_str)
197198
except Exception as e:
198199
print(f"Error: Cannot fix fallback_repair_json {s=} {e=}")
199200
return False
@@ -400,18 +401,6 @@ def mllm_output_to_dict(input_string, give_up_parsing=False, text_prompt=None, s
400401
print(f"Now fixing: {e1=} {json_str=}")
401402

402403
new_data = robust_json_fix(json_str)
403-
404-
# try:
405-
# new_data = json.loads(fix_json(json_str))
406-
# return new_data
407-
# except Exception as e2:
408-
# try:
409-
# print(f"Now fixing: {e2=} {fix_json(json_str)=}")
410-
# new_data = json.loads(repair_reasoning_field_robust(fix_json(json_str)))
411-
# return new_data
412-
# except Exception as e3:
413-
# print(f"Error: Cannot fix {e3=} {repair_reasoning_field_robust(fix_json(json_str))=}")
414-
# return False
415404
return new_data
416405
else:
417406
print("The required delimiters were not found correctly in the string.")
Lines changed: 87 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,87 @@
1+
# !/bin/bash
2+
SHELL_FOLDER=$(cd "$(dirname "$0")";pwd)
3+
cd $(dirname $SHELL_FOLDER)
4+
cd ../
5+
6+
experiment_name=omnigen2_edit_rl_4machine_editscore7b_avg8
7+
step=700
8+
RANK=0
9+
WORLD_SIZE=1
10+
11+
while [[ $# -gt 0 ]]; do
12+
case "$1" in
13+
--experiment_name=*)
14+
experiment_name="${1#*=}"
15+
shift
16+
;;
17+
--step=*)
18+
step="${1#*=}"
19+
shift
20+
;;
21+
--rank=*)
22+
RANK="${1#*=}"
23+
shift
24+
;;
25+
--world_size=*)
26+
WORLD_SIZE="${1#*=}"
27+
shift
28+
;;
29+
*)
30+
echo "未知参数: $1"
31+
shift
32+
;;
33+
esac
34+
done
35+
36+
# 输出配置
37+
echo "RANK: $RANK"
38+
echo "WORLD_SIZE: $WORLD_SIZE"
39+
40+
global_shift_index=0
41+
total_num_images=606
42+
43+
num_gpus_per_machine=$(python -c "import torch; print(torch.cuda.device_count())")
44+
# Calculate images per machine, rounding up to ensure all data is covered
45+
num_images_per_machine=$(( (total_num_images + WORLD_SIZE - 1) / WORLD_SIZE ))
46+
shift_index=$((RANK * num_images_per_machine))
47+
48+
if [ $((total_num_images - shift_index)) -lt $num_images_per_machine ]; then
49+
num_images_per_machine=$((total_num_images - shift_index))
50+
fi
51+
52+
# Calculate base number of images per GPU (for first 7 GPUs)
53+
num_images_per_gpu=$(( (num_images_per_machine + num_gpus_per_machine - 1) / num_gpus_per_machine ))
54+
55+
text_guidance_scale=5.0
56+
image_guidance_scale=1.5
57+
58+
for ((i=0; i<num_gpus_per_machine; i++)); do
59+
if [ $i -lt $((num_gpus_per_machine - 1)) ]; then
60+
# First 7 GPUs process equal amounts
61+
start_idx=$((global_shift_index + i * num_images_per_gpu + shift_index))
62+
end_idx=$((start_idx + num_images_per_gpu))
63+
else
64+
# Last GPU processes remaining data
65+
start_idx=$((global_shift_index + (num_gpus_per_machine - 1) * num_images_per_gpu + shift_index))
66+
end_idx=$((global_shift_index + shift_index + num_images_per_machine))
67+
fi
68+
echo ${start_idx} ${end_idx}
69+
70+
CUDA_VISIBLE_DEVICES=${i} WORLD_SIZE=1 nohup accelerate launch --num_processes 1 --num_machines 1 \
71+
evaluation/GEdit-Bench/inference.py \
72+
--load_from_pipeline \
73+
--pipeline_path OmniGen2/OmniGen2 \
74+
--transformer_lora_path experiments/${experiment_name}/checkpoint-${step}/transformer_lora \
75+
--num_inference_step 50 \
76+
--height 1024 \
77+
--width 1024 \
78+
--text_guidance_scale ${text_guidance_scale} \
79+
--image_guidance_scale ${image_guidance_scale} \
80+
--time_shift_base_res 168 \
81+
--negative_prompt "" \
82+
--use_ori_neg_prompt_template \
83+
--scheduler "euler" \
84+
--result_dir evaluation/GEdit-Bench/results/${experiment_name}/results_step${step}_ts${text_guidance_scale}_ig${image_guidance_scale} \
85+
--start_index ${start_idx} --end_index ${end_idx} \
86+
> logs/gedit_${experiment_name}_step${step}_ts${text_guidance_scale}_ig${image_guidance_scale}_${start_idx}_${end_idx}.log 2>&1 &
87+
done
Lines changed: 42 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,42 @@
1+
# !/bin/bash
2+
SHELL_FOLDER=$(cd "$(dirname "$0")";pwd)
3+
cd $(dirname $SHELL_FOLDER)
4+
cd ../
5+
6+
source "$(dirname $(which conda))/../etc/profile.d/conda.sh"
7+
conda activate py3.12+pytorch2.7.1+cu126
8+
9+
experiment_name=omnigen2_edit_rl_4machine_editscore7b_avg8
10+
step=700
11+
12+
# 处理命名参数
13+
while [[ $# -gt 0 ]]; do
14+
case "$1" in
15+
--experiment_name=*)
16+
experiment_name="${1#*=}"
17+
shift
18+
;;
19+
--step=*)
20+
step="${1#*=}"
21+
shift
22+
;;
23+
*)
24+
echo "未知参数: $1"
25+
shift
26+
;;
27+
esac
28+
done
29+
30+
text_guidance_scale=5.0
31+
image_guidance_scale=1.5
32+
33+
accelerate launch --num_processes 1 evaluation/GEdit-Bench/test_gedit_score.py \
34+
--result_dir evaluation/GEdit-Bench/results/${experiment_name}/results_step${step}_ts${text_guidance_scale}_ig${image_guidance_scale} \
35+
--backbone gpt-4.1 \
36+
--openai_url https://api.openai.com/v1/chat/completions \
37+
--max_workers 30 \
38+
--key PUT-YOUR-KEY-HERE
39+
40+
python evaluation/GEdit-Bench/calculate_statistics.py \
41+
--result_dir evaluation/GEdit-Bench/results/${experiment_name}/results_step${step}_ts${text_guidance_scale}_ig${image_guidance_scale}/viescore_gpt-4.1 \
42+
--language en

examples/OmniGen2-RL/options/omnigen2_edit_rl_4machine_editscore7b_avg4.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -105,7 +105,7 @@ train:
105105
server_type: vlm
106106
use_ori_neg_prompt_template: true
107107
time_shift_base_res: 168
108-
policy_loss_reweighting: false
108+
policy_loss_reweighting: true
109109

110110
val:
111111
train_visualization_interval: 5

examples/OmniGen2-RL/options/omnigen2_edit_rl_4machine_editscore7b_avg8.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -105,7 +105,7 @@ train:
105105
server_type: vlm
106106
use_ori_neg_prompt_template: true
107107
time_shift_base_res: 168
108-
policy_loss_reweighting: false
108+
policy_loss_reweighting: true
109109

110110
val:
111111
train_visualization_interval: 5

examples/OmniGen2-RL/options/omnigen2_edit_rl_single_machine_editscore7b.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -105,7 +105,7 @@ train:
105105
server_type: vlm
106106
use_ori_neg_prompt_template: true
107107
time_shift_base_res: 168
108-
policy_loss_reweighting: false
108+
policy_loss_reweighting: true
109109

110110
val:
111111
train_visualization_interval: 5

pyproject.toml

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
44

55
[project]
66
name = "editscore"
7-
version = "0.1.9"
7+
version = "0.2"
88
authors = [
99
{ name="Xin Luo", email="[email protected]" },
1010
{ name="Jiahao Wang", email="[email protected]" },
@@ -27,6 +27,7 @@ dependencies = [
2727
"qwen-vl-utils",
2828
"peft",
2929
"Pillow",
30+
"json-repair"
3031
]
3132

3233
[project.urls]

0 commit comments

Comments
 (0)