-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathinference.py
32 lines (31 loc) · 1.03 KB
/
inference.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
from omni_model.omni_space import *
a = OmniBind_Large(pretrained=True)
a = a.cuda()
a = a.eval()
with torch.no_grad():
aud = a.emb_audios(['assets/train.wav', 'assets/toilet.wav'])
img = a.emb_images(['assets/train.jpeg', 'assets/toilet.jpeg'])
txt = a.emb_texts(['a photo of train', 'a photo of toilet'])
pc = a.emb_points(['assets/train.npy', 'assets/toilet.npy'])
print(aud.shape, img.shape, txt.shape, pc.shape)
print([email protected])
print([email protected])
print([email protected])
print([email protected])
print([email protected])
print([email protected])
# a = OmniBind_Base()
# # a = a.cuda()
# with torch.no_grad():
# aud = a.emb_audios(['assets/train.wav', 'assets/toilet.wav'])
# img = a.emb_images(['assets/train.jpeg', 'assets/toilet.jpeg'])
# txt = a.emb_texts(['assets/train.jpeg', 'assets/toilet.jpeg'])
# pc = a.emb_points(['assets/train.npy', 'assets/toilet.npy'])
# print(aud.shape, img.shape, txt.shape, pc.shape)
# print([email protected])
# print([email protected])
# print([email protected])
# print([email protected])
# print([email protected])
# print([email protected])
# print('inf passed')