-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathutils.py
53 lines (42 loc) · 1.52 KB
/
utils.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
"""
Author: Haoran Chen
Date: 2022.08.15
"""
import os
from clip import clip
import torch
from torch import nn
from torchvision import datasets
import tqdm
import numpy as np
def target_text(target_path):
target_classes = os.listdir(target_path)
target_classes = [name.replace("_", " ") for name in target_classes]
target_classes.sort()
for i in range(len(target_classes)):
target_classes[i] = 'A photo of a ' + target_classes[i]
return target_classes
def Prompt(classnames, clip_model, prompt, args):
dtype = torch.float32
prompt_prefix = " ".join(["X"] * (args.M1 + args.M2))
classnames = [name.replace("_", " ") for name in classnames]
prompts = [prompt_prefix + " " + name + "." for name in classnames]
tokenized_prompts = torch.cat([clip.tokenize(p) for p in prompts]).to(args.device)
with torch.no_grad():
embedding = clip_model.token_embedding(tokenized_prompts).type(dtype)
prefix = embedding[:, :1, :]
suffix = embedding[:, 1 + args.M1 + args.M2:, :]
source_prompts = torch.cat(
[prefix, # (n_cls, 1, dim)
prompt, # (n_cls, M1 + 1, dim)
suffix, # (n_cls, *, dim)
],
dim=1)
return source_prompts, tokenized_prompts
def l1(logits_list):
l1_loss = 0
while len(logits_list) > 1:
logits1 = logits_list.pop()
for logits2 in logits_list:
l1_loss += torch.mean(torch.abs(torch.nn.functional.softmax(logits1, dim=-1) - torch.nn.functional.softmax(logits2, dim=-1)))
return l1_loss