From 9ef93fccade1f2b6349c2eec2188d0ea54329f84 Mon Sep 17 00:00:00 2001 From: Lucain Date: Thu, 6 Jun 2024 13:05:59 +0200 Subject: [PATCH] Switch from `cached_download` to `hf_hub_download` in remaining occurrences (#31284) Switch from hf_hub_url to hf_hub_download in remaining occurences --- docs/source/en/tasks/semantic_segmentation.md | 5 +++-- docs/source/ja/tasks/semantic_segmentation.md | 5 +++-- docs/source/ja/tasks/sequence_classification.md | 5 +++-- docs/source/ko/tasks/semantic_segmentation.md | 5 +++-- .../convert_cvt_original_pytorch_checkpoint_to_pytorch.py | 5 +++-- .../deformable_detr/convert_deformable_detr_to_pytorch.py | 4 ++-- .../models/deprecated/deta/convert_deta_resnet_to_pytorch.py | 4 ++-- .../models/deprecated/deta/convert_deta_swin_to_pytorch.py | 4 ++-- src/transformers/models/dpt/convert_dpt_hybrid_to_pytorch.py | 4 ++-- src/transformers/models/dpt/convert_dpt_to_pytorch.py | 4 ++-- .../models/regnet/convert_regnet_seer_10b_to_pytorch.py | 4 ++-- src/transformers/models/regnet/convert_regnet_to_pytorch.py | 4 ++-- 12 files changed, 29 insertions(+), 24 deletions(-) diff --git a/docs/source/en/tasks/semantic_segmentation.md b/docs/source/en/tasks/semantic_segmentation.md index a354b1d818902b..974b09b7a347d9 100644 --- a/docs/source/en/tasks/semantic_segmentation.md +++ b/docs/source/en/tasks/semantic_segmentation.md @@ -245,11 +245,12 @@ You'll also want to create a dictionary that maps a label id to a label class wh ```py >>> import json ->>> from huggingface_hub import cached_download, hf_hub_url +>>> from pathlib import Path +>>> from huggingface_hub import hf_hub_download >>> repo_id = "huggingface/label-files" >>> filename = "ade20k-id2label.json" ->>> id2label = json.load(open(cached_download(hf_hub_url(repo_id, filename, repo_type="dataset")), "r")) +>>> id2label = json.loads(Path(hf_hub_download(repo_id, filename, repo_type="dataset")).read_text()) >>> id2label = {int(k): v for k, v in id2label.items()} >>> label2id = {v: k for k, v in id2label.items()} >>> num_labels = len(id2label) diff --git a/docs/source/ja/tasks/semantic_segmentation.md b/docs/source/ja/tasks/semantic_segmentation.md index 56fb47d52f7e37..531e5f9763d9cd 100644 --- a/docs/source/ja/tasks/semantic_segmentation.md +++ b/docs/source/ja/tasks/semantic_segmentation.md @@ -83,11 +83,12 @@ pip install -q datasets transformers evaluate ```py >>> import json ->>> from huggingface_hub import cached_download, hf_hub_url +>>> from pathlib import Path +>>> from huggingface_hub import hf_hub_download >>> repo_id = "huggingface/label-files" >>> filename = "ade20k-id2label.json" ->>> id2label = json.load(open(cached_download(hf_hub_url(repo_id, filename, repo_type="dataset")), "r")) +>>> id2label = json.loads(Path(hf_hub_download(repo_id, filename, repo_type="dataset")).read_text()) >>> id2label = {int(k): v for k, v in id2label.items()} >>> label2id = {v: k for k, v in id2label.items()} >>> num_labels = len(id2label) diff --git a/docs/source/ja/tasks/sequence_classification.md b/docs/source/ja/tasks/sequence_classification.md index 4c2a70ab8a303d..87e721df13382f 100644 --- a/docs/source/ja/tasks/sequence_classification.md +++ b/docs/source/ja/tasks/sequence_classification.md @@ -83,11 +83,12 @@ pip install -q datasets transformers evaluate ```py >>> import json ->>> from huggingface_hub import cached_download, hf_hub_url +>>> from pathlib import Path +>>> from huggingface_hub import hf_hub_download >>> repo_id = "huggingface/label-files" >>> filename = "ade20k-id2label.json" ->>> id2label = json.load(open(cached_download(hf_hub_url(repo_id, filename, repo_type="dataset")), "r")) +>>> id2label = json.loads(Path(hf_hub_download(repo_id, filename, repo_type="dataset")).read_text()) >>> id2label = {int(k): v for k, v in id2label.items()} >>> label2id = {v: k for k, v in id2label.items()} >>> num_labels = len(id2label) diff --git a/docs/source/ko/tasks/semantic_segmentation.md b/docs/source/ko/tasks/semantic_segmentation.md index 8a5e20228d608f..212c30a6cd7d06 100644 --- a/docs/source/ko/tasks/semantic_segmentation.md +++ b/docs/source/ko/tasks/semantic_segmentation.md @@ -82,11 +82,12 @@ pip install -q datasets transformers evaluate ```py >>> import json ->>> from huggingface_hub import cached_download, hf_hub_url +>>> from pathlib import Path +>>> from huggingface_hub import hf_hub_download >>> repo_id = "huggingface/label-files" >>> filename = "ade20k-id2label.json" ->>> id2label = json.load(open(cached_download(hf_hub_url(repo_id, filename, repo_type="dataset")), "r")) +>>> id2label = json.loads(Path(hf_hub_download(repo_id, filename, repo_type="dataset")).read_text()) >>> id2label = {int(k): v for k, v in id2label.items()} >>> label2id = {v: k for k, v in id2label.items()} >>> num_labels = len(id2label) diff --git a/src/transformers/models/cvt/convert_cvt_original_pytorch_checkpoint_to_pytorch.py b/src/transformers/models/cvt/convert_cvt_original_pytorch_checkpoint_to_pytorch.py index f01436514007a5..9f76c92887f42e 100644 --- a/src/transformers/models/cvt/convert_cvt_original_pytorch_checkpoint_to_pytorch.py +++ b/src/transformers/models/cvt/convert_cvt_original_pytorch_checkpoint_to_pytorch.py @@ -19,9 +19,10 @@ import argparse import json from collections import OrderedDict +from pathlib import Path import torch -from huggingface_hub import cached_download, hf_hub_url +from huggingface_hub import hf_hub_download from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification @@ -283,7 +284,7 @@ def convert_cvt_checkpoint(cvt_model, image_size, cvt_file_name, pytorch_dump_fo repo_id = "huggingface/label-files" num_labels = num_labels - id2label = json.load(open(cached_download(hf_hub_url(repo_id, img_labels_file, repo_type="dataset")), "r")) + id2label = json.loads(Path(hf_hub_download(repo_id, img_labels_file, repo_type="dataset")).read_text()) id2label = {int(k): v for k, v in id2label.items()} id2label = id2label diff --git a/src/transformers/models/deformable_detr/convert_deformable_detr_to_pytorch.py b/src/transformers/models/deformable_detr/convert_deformable_detr_to_pytorch.py index b637ba6d84bb02..781b823e96f375 100644 --- a/src/transformers/models/deformable_detr/convert_deformable_detr_to_pytorch.py +++ b/src/transformers/models/deformable_detr/convert_deformable_detr_to_pytorch.py @@ -20,7 +20,7 @@ import requests import torch -from huggingface_hub import cached_download, hf_hub_url +from huggingface_hub import hf_hub_download from PIL import Image from transformers import DeformableDetrConfig, DeformableDetrForObjectDetection, DeformableDetrImageProcessor @@ -109,7 +109,7 @@ def convert_deformable_detr_checkpoint( config.num_labels = 91 repo_id = "huggingface/label-files" filename = "coco-detection-id2label.json" - id2label = json.load(open(cached_download(hf_hub_url(repo_id, filename, repo_type="dataset")), "r")) + id2label = json.loads(Path(hf_hub_download(repo_id, filename, repo_type="dataset")).read_text()) id2label = {int(k): v for k, v in id2label.items()} config.id2label = id2label config.label2id = {v: k for k, v in id2label.items()} diff --git a/src/transformers/models/deprecated/deta/convert_deta_resnet_to_pytorch.py b/src/transformers/models/deprecated/deta/convert_deta_resnet_to_pytorch.py index 870c56f838c290..60e93efe7c60b0 100644 --- a/src/transformers/models/deprecated/deta/convert_deta_resnet_to_pytorch.py +++ b/src/transformers/models/deprecated/deta/convert_deta_resnet_to_pytorch.py @@ -22,7 +22,7 @@ import requests import torch -from huggingface_hub import cached_download, hf_hub_download, hf_hub_url +from huggingface_hub import hf_hub_download from PIL import Image from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor @@ -48,7 +48,7 @@ def get_deta_config(): config.num_labels = 91 repo_id = "huggingface/label-files" filename = "coco-detection-id2label.json" - id2label = json.load(open(cached_download(hf_hub_url(repo_id, filename, repo_type="dataset")), "r")) + id2label = json.loads(Path(hf_hub_download(repo_id, filename, repo_type="dataset")).read_text()) id2label = {int(k): v for k, v in id2label.items()} config.id2label = id2label config.label2id = {v: k for k, v in id2label.items()} diff --git a/src/transformers/models/deprecated/deta/convert_deta_swin_to_pytorch.py b/src/transformers/models/deprecated/deta/convert_deta_swin_to_pytorch.py index 67052edce1b1e5..392750fa67a180 100644 --- a/src/transformers/models/deprecated/deta/convert_deta_swin_to_pytorch.py +++ b/src/transformers/models/deprecated/deta/convert_deta_swin_to_pytorch.py @@ -22,7 +22,7 @@ import requests import torch -from huggingface_hub import cached_download, hf_hub_download, hf_hub_url +from huggingface_hub import hf_hub_download from PIL import Image from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig @@ -63,7 +63,7 @@ def get_deta_config(model_name): filename = "coco-detection-id2label.json" config.num_labels = num_labels - id2label = json.load(open(cached_download(hf_hub_url(repo_id, filename, repo_type="dataset")), "r")) + id2label = json.loads(Path(hf_hub_download(repo_id, filename, repo_type="dataset")).read_text()) id2label = {int(k): v for k, v in id2label.items()} config.id2label = id2label config.label2id = {v: k for k, v in id2label.items()} diff --git a/src/transformers/models/dpt/convert_dpt_hybrid_to_pytorch.py b/src/transformers/models/dpt/convert_dpt_hybrid_to_pytorch.py index 1304acaafcaab2..a407a67f3813ed 100644 --- a/src/transformers/models/dpt/convert_dpt_hybrid_to_pytorch.py +++ b/src/transformers/models/dpt/convert_dpt_hybrid_to_pytorch.py @@ -20,7 +20,7 @@ import requests import torch -from huggingface_hub import cached_download, hf_hub_url +from huggingface_hub import hf_hub_download from PIL import Image from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor @@ -61,7 +61,7 @@ def get_dpt_config(checkpoint_url): config.patch_size = 16 repo_id = "huggingface/label-files" filename = "ade20k-id2label.json" - id2label = json.load(open(cached_download(hf_hub_url(repo_id, filename, repo_type="dataset")), "r")) + id2label = json.loads(Path(hf_hub_download(repo_id, filename, repo_type="dataset")).read_text()) id2label = {int(k): v for k, v in id2label.items()} config.id2label = id2label config.label2id = {v: k for k, v in id2label.items()} diff --git a/src/transformers/models/dpt/convert_dpt_to_pytorch.py b/src/transformers/models/dpt/convert_dpt_to_pytorch.py index b55c96f0c702e0..489da9acd19c68 100644 --- a/src/transformers/models/dpt/convert_dpt_to_pytorch.py +++ b/src/transformers/models/dpt/convert_dpt_to_pytorch.py @@ -20,7 +20,7 @@ import requests import torch -from huggingface_hub import cached_download, hf_hub_url +from huggingface_hub import hf_hub_download from PIL import Image from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor @@ -49,7 +49,7 @@ def get_dpt_config(checkpoint_url): config.num_labels = 150 repo_id = "huggingface/label-files" filename = "ade20k-id2label.json" - id2label = json.load(open(cached_download(hf_hub_url(repo_id, filename, repo_type="dataset")), "r")) + id2label = json.loads(Path(hf_hub_download(repo_id, filename, repo_type="dataset")).read_text()) id2label = {int(k): v for k, v in id2label.items()} config.id2label = id2label config.label2id = {v: k for k, v in id2label.items()} diff --git a/src/transformers/models/regnet/convert_regnet_seer_10b_to_pytorch.py b/src/transformers/models/regnet/convert_regnet_seer_10b_to_pytorch.py index 93a516fb3c7747..a06b2e830de0fb 100644 --- a/src/transformers/models/regnet/convert_regnet_seer_10b_to_pytorch.py +++ b/src/transformers/models/regnet/convert_regnet_seer_10b_to_pytorch.py @@ -30,7 +30,7 @@ import torch import torch.nn as nn from classy_vision.models.regnet import RegNet, RegNetParams -from huggingface_hub import cached_download, hf_hub_url +from huggingface_hub import hf_hub_download from torch import Tensor from vissl.models.model_helpers import get_trunk_forward_outputs @@ -165,7 +165,7 @@ def convert_weights_and_push(save_directory: Path, model_name: str = None, push_ repo_id = "huggingface/label-files" num_labels = num_labels - id2label = json.load(open(cached_download(hf_hub_url(repo_id, filename, repo_type="dataset")), "r")) + id2label = json.loads(Path(hf_hub_download(repo_id, filename, repo_type="dataset")).read_text()) id2label = {int(k): v for k, v in id2label.items()} id2label = id2label diff --git a/src/transformers/models/regnet/convert_regnet_to_pytorch.py b/src/transformers/models/regnet/convert_regnet_to_pytorch.py index 1f89b7bf8cc04c..38158b682cb557 100644 --- a/src/transformers/models/regnet/convert_regnet_to_pytorch.py +++ b/src/transformers/models/regnet/convert_regnet_to_pytorch.py @@ -25,7 +25,7 @@ import torch import torch.nn as nn from classy_vision.models.regnet import RegNet, RegNetParams, RegNetY32gf, RegNetY64gf, RegNetY128gf -from huggingface_hub import cached_download, hf_hub_url +from huggingface_hub import hf_hub_download from torch import Tensor from vissl.models.model_helpers import get_trunk_forward_outputs @@ -225,7 +225,7 @@ def convert_weights_and_push(save_directory: Path, model_name: str = None, push_ repo_id = "huggingface/label-files" num_labels = num_labels - id2label = json.load(open(cached_download(hf_hub_url(repo_id, filename, repo_type="dataset")), "r")) + id2label = json.loads(Path(hf_hub_download(repo_id, filename, repo_type="dataset")).read_text()) id2label = {int(k): v for k, v in id2label.items()} id2label = id2label