-
Notifications
You must be signed in to change notification settings - Fork 964
221 lines (199 loc) · 7.02 KB
/
gateway.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
name: "gateway-test"
on:
push:
branches:
- release-**
paths-ignore:
- 'docs/**'
- '**.md'
pull_request:
#The branches below must be a subset of the branches above
branches:
- release-**
paths-ignore:
- 'docs/**'
- '**.md'
workflow_dispatch:
inputs:
debug:
type: boolean
description: "Run the build with tmate debugging enabled"
required: false
default: false
schedule:
- cron: '0 0 * * *'
jobs:
gateway:
strategy:
fail-fast: false
matrix:
meta: [ 'sqlite3', 'redis','tikv', 'badger', 'etcd', 'fdb']
file_size: ['100M']
isolation_level: ['']
include:
- meta: 'mariadb'
file_size: '100M'
isolation_level: "read committed"
- meta: 'mysql'
file_size: '100M'
isolation_level: "read committed"
- meta: 'mysql'
file_size: '100M'
isolation_level: "repeatable read"
- meta: 'mysql'
file_size: '100M'
isolation_level: "serializable"
- meta: 'postgres'
file_size: '100M'
isolation_level: "read committed"
- meta: 'postgres'
file_size: '100M'
isolation_level: "repeatable read"
- meta: 'postgres'
file_size: '100M'
isolation_level: "serializable"
- meta: 'tidb'
file_size: '100M'
isolation_level: "read committed"
- meta: 'tidb'
file_size: '100M'
isolation_level: "repeatable read"
runs-on: ubuntu-20.04
steps:
- name: Checkout
uses: actions/checkout@v3
with:
fetch-depth: 1
- name: Set Variable
id: vars
run: |
if [ "${{matrix.meta}}" == "fdb" ]; then
echo "target=juicefs.fdb" >> $GITHUB_OUTPUT
else
echo "target=juicefs" >> $GITHUB_OUTPUT
fi
- name: Build
uses: ./.github/actions/build
with:
target: ${{steps.vars.outputs.target}}
- name: Start meta
run: |
sudo chmod +x .github/scripts/start_meta_engine.sh
source .github/scripts/start_meta_engine.sh
start_meta_engine ${{matrix.meta}}
- name: Install tools
run: |
wget -q https://dl.minio.io/client/mc/release/linux-amd64/mc
chmod +x mc
shell: bash
- name: start gateway
shell: bash
run: |
source .github/scripts/start_meta_engine.sh
meta_url=$(get_meta_url ${{matrix.meta}})
create_database $meta_url ${{matrix.isolation_level}}
mp="/tmp/myjfs"
volume="myjfs"
export MINIO_ROOT_USER=minioadmin
export MINIO_ROOT_PASSWORD=minioadmin
./juicefs format $meta_url $volume --trash-days 0
./juicefs gateway $meta_url localhost:8080 --no-usage-report --access-log /tmp/access1.log &
- name: Sync with multiple process
shell: bash
run: |
source .github/scripts/start_meta_engine.sh
meta_url=$(get_meta_url ${{matrix.meta}})
file_size=${{matrix.file_size}}
if [ $file_size="100M" ]; then
file_count=5
else
file_count=2000
fi
threads=20
mp=/tmp/myjfs
volume=myjfs
dd if=/dev/urandom of=file iflag=fullblock,count_bytes bs=4k count="$file_size" > /dev/null
mkdir data
for i in $(seq 1 $file_count); do
cp file data/file$i
done
start=`date +%s`
declare -a pids
./juicefs sync --dirs data/ s3://minioadmin:minioadmin@localhost:8080/$volume/data/ --no-https -p $threads &
pids+=($!)
./juicefs sync --dirs data/ s3://minioadmin:minioadmin@localhost:8080/$volume/data/ --no-https -p $threads &
pids+=($!)
./juicefs sync --dirs data/ s3://minioadmin:minioadmin@localhost:8080/$volume/data/ --no-https -p $threads &
pids+=($!)
wait "${pids[@]}"
rm -rf $HOME/.juicefs/cache/ || true
# ./mc alias set minio http://localhost:9000 minioadmin minioadmin --api S3v4
# ./mc mb minio/$volume
# ./mc cp --recursive data/ minio/$volume/data
end=`date +%s`
time=$((end-start))
echo time cost is: $time second
killall juicefs
sleep 3
./juicefs mount -d $meta_url $mp --no-usage-report
diff -ur data/ $mp/data/
echo "diff succeed"
./juicefs umount $mp --force
- name: Sync with empty dir
run: |
source .github/scripts/start_meta_engine.sh
meta_url=$(get_meta_url ${{matrix.meta}})
threads=20
mp=/tmp/myjfs
volume=myjfs
export MINIO_ROOT_USER=minioadmin
export MINIO_ROOT_PASSWORD=minioadmin
./juicefs mdtest $meta_url test --dirs 10 --depth 2 --files 10 --threads 10 --no-usage-report
./juicefs gateway $meta_url localhost:8080 --access-log /tmp/access1.log &
sleep 10
mkdir emptydir
declare -a pids
./juicefs sync emptydir/ s3://minioadmin:minioadmin@localhost:8080/$volume/test/ --delete-dst --no-https -p $threads &
pids+=($!)
./juicefs sync emptydir/ s3://minioadmin:minioadmin@localhost:8080/$volume/test/ --delete-dst --no-https -p $threads &
pids+=($!)
./juicefs sync emptydir/ s3://minioadmin:minioadmin@localhost:8080/$volume/test/ --delete-dst --no-https -p $threads &
pids+=($!)
wait "${pids[@]}"
killall juicefs
sleep 3
./juicefs mount -d $meta_url $mp --no-usage-report
[ -d "$mp/test/" ] && exit 1
./juicefs umount $mp --force
shell: bash
- name: log
if: ${{ always() }}
shell: bash
run: |
if [ -f ~/.juicefs/juicefs.log ]; then
tail -300 ~/.juicefs/juicefs.log
grep "<FATAL>:" ~/.juicefs/juicefs.log && exit 1 || true
fi
- name: Setup upterm session
if: ${{ failure() && github.event_name == 'workflow_dispatch' && github.event.inputs.debug == 'true' }}
timeout-minutes: 1
uses: lhotari/action-upterm@v1
success-all-test:
runs-on: ubuntu-latest
needs: [gateway]
if: always()
steps:
- uses: technote-space/workflow-conclusion-action@v3
- uses: actions/checkout@v3
- name: Check Failure
if: env.WORKFLOW_CONCLUSION == 'failure'
run: exit 1
- name: Send Slack Notification
if: ${{ failure() && github.event_name != 'workflow_dispatch' }}
uses: juicedata/slack-notify-action@main
with:
channel-id: "${{ secrets.SLACK_CHANNEL_ID_FOR_PR_CHECK_NOTIFY }}"
slack_bot_token: "${{ secrets.SLACK_BOT_TOKEN }}"
- name: Success
if: ${{ success() }}
run: echo "All Done"