-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathdemo.py
51 lines (38 loc) · 1.78 KB
/
demo.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
import os.path
from PIL import Image
#import requests
import torch
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from IPython.display import display # Import the display function
import matplotlib.pyplot as plt
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print(f"Device : {device}")
def load_demo_image(img_path, device, image_size=384):
#img_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'
#raw_image = Image.open(requests.get(img_url, stream=True).raw).convert('RGB')
raw_image = Image.open(img_path)
w, h = raw_image.size
display(raw_image.resize((w // 5, h // 5)))
transform = transforms.Compose([
transforms.Resize((image_size, image_size), interpolation=InterpolationMode.BICUBIC),
transforms.ToTensor(),
transforms.Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711))
])
image = transform(raw_image).unsqueeze(0).to(device)
return image
def main_blip(img_path):
from models.blip import blip_decoder
image_size = 384
image = load_demo_image(img_path=img_path, device=device)
model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth'
model = blip_decoder(pretrained=model_url, image_size=image_size, vit='base')
model.eval()
model = model.to(device)
with torch.no_grad():
# beam search
caption = model.generate(image, sample=False, num_beams=3, max_length=20, min_length=5)
# nucleus sampling
# caption = model.generate(image, sample=True, top_p=0.9, max_length=20, min_length=5)
print(f'Caption for {os.path.basename(img_path)} : ' + caption[0])
return caption[0]