cranky-coder08 commited on
Commit
783a8bf
·
verified ·
1 Parent(s): 15804d9

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +3 -0
  2. merged_tinyllama_logger/tokenizer.model +3 -0
  3. model_output/incremental_1_logs/checkpoint-575/rng_state.pth +3 -0
  4. model_output/incremental_1_logs/checkpoint-575/scheduler.pt +3 -0
  5. model_output/incremental_1_logs/tokenizer.model +3 -0
  6. model_output/incremental_1_logs/training_args.bin +3 -0
  7. phivenv/Lib/site-packages/huggingface_hub/cli/__pycache__/__init__.cpython-39.pyc +0 -0
  8. phivenv/Lib/site-packages/huggingface_hub/cli/__pycache__/_cli_utils.cpython-39.pyc +0 -0
  9. phivenv/Lib/site-packages/huggingface_hub/cli/__pycache__/upload.cpython-39.pyc +0 -0
  10. phivenv/Lib/site-packages/huggingface_hub/cli/__pycache__/upload_large_folder.cpython-39.pyc +0 -0
  11. phivenv/Lib/site-packages/huggingface_hub/commands/__init__.py +27 -0
  12. phivenv/Lib/site-packages/huggingface_hub/commands/__pycache__/__init__.cpython-39.pyc +0 -0
  13. phivenv/Lib/site-packages/huggingface_hub/commands/__pycache__/_cli_utils.cpython-39.pyc +0 -0
  14. phivenv/Lib/site-packages/huggingface_hub/commands/__pycache__/delete_cache.cpython-39.pyc +0 -0
  15. phivenv/Lib/site-packages/huggingface_hub/commands/__pycache__/download.cpython-39.pyc +0 -0
  16. phivenv/Lib/site-packages/huggingface_hub/commands/__pycache__/env.cpython-39.pyc +0 -0
  17. phivenv/Lib/site-packages/huggingface_hub/commands/__pycache__/huggingface_cli.cpython-39.pyc +0 -0
  18. phivenv/Lib/site-packages/huggingface_hub/commands/__pycache__/lfs.cpython-39.pyc +0 -0
  19. phivenv/Lib/site-packages/huggingface_hub/commands/__pycache__/repo.cpython-39.pyc +0 -0
  20. phivenv/Lib/site-packages/huggingface_hub/commands/__pycache__/repo_files.cpython-39.pyc +0 -0
  21. phivenv/Lib/site-packages/huggingface_hub/commands/__pycache__/scan_cache.cpython-39.pyc +0 -0
  22. phivenv/Lib/site-packages/huggingface_hub/commands/__pycache__/tag.cpython-39.pyc +0 -0
  23. phivenv/Lib/site-packages/huggingface_hub/commands/__pycache__/upload.cpython-39.pyc +0 -0
  24. phivenv/Lib/site-packages/huggingface_hub/commands/__pycache__/upload_large_folder.cpython-39.pyc +0 -0
  25. phivenv/Lib/site-packages/huggingface_hub/commands/__pycache__/user.cpython-39.pyc +0 -0
  26. phivenv/Lib/site-packages/huggingface_hub/commands/__pycache__/version.cpython-39.pyc +0 -0
  27. phivenv/Lib/site-packages/huggingface_hub/commands/_cli_utils.py +74 -0
  28. phivenv/Lib/site-packages/huggingface_hub/commands/delete_cache.py +476 -0
  29. phivenv/Lib/site-packages/huggingface_hub/commands/download.py +204 -0
  30. phivenv/Lib/site-packages/huggingface_hub/commands/env.py +39 -0
  31. phivenv/Lib/site-packages/huggingface_hub/commands/huggingface_cli.py +65 -0
  32. phivenv/Lib/site-packages/huggingface_hub/commands/lfs.py +200 -0
  33. phivenv/Lib/site-packages/huggingface_hub/commands/repo.py +151 -0
  34. phivenv/Lib/site-packages/huggingface_hub/commands/repo_files.py +132 -0
  35. phivenv/Lib/site-packages/huggingface_hub/commands/scan_cache.py +183 -0
  36. phivenv/Lib/site-packages/huggingface_hub/commands/tag.py +161 -0
  37. phivenv/Lib/site-packages/huggingface_hub/commands/upload.py +318 -0
  38. phivenv/Lib/site-packages/huggingface_hub/commands/upload_large_folder.py +131 -0
  39. phivenv/Lib/site-packages/huggingface_hub/commands/user.py +208 -0
  40. phivenv/Lib/site-packages/huggingface_hub/commands/version.py +40 -0
  41. phivenv/Lib/site-packages/huggingface_hub/inference/__init__.py +0 -0
  42. phivenv/Lib/site-packages/huggingface_hub/inference/__pycache__/__init__.cpython-39.pyc +0 -0
  43. phivenv/Lib/site-packages/huggingface_hub/inference/__pycache__/_common.cpython-39.pyc +0 -0
  44. phivenv/Lib/site-packages/huggingface_hub/inference/_client.py +0 -0
  45. phivenv/Lib/site-packages/huggingface_hub/inference/_common.py +457 -0
  46. phivenv/Lib/site-packages/huggingface_hub/inference/_generated/__init__.py +0 -0
  47. phivenv/Lib/site-packages/huggingface_hub/inference/_generated/__pycache__/__init__.cpython-39.pyc +0 -0
  48. phivenv/Lib/site-packages/huggingface_hub/inference/_generated/_async_client.py +0 -0
  49. phivenv/Lib/site-packages/huggingface_hub/inference/_generated/types/__init__.py +192 -0
  50. phivenv/Lib/site-packages/huggingface_hub/inference/_generated/types/__pycache__/__init__.cpython-39.pyc +0 -0
.gitattributes CHANGED
@@ -3,3 +3,6 @@
3
  *.h5 filter=lfs diff=lfs merge=lfs -text
4
  *.pth filter=lfs diff=lfs merge=lfs -text
5
  *.tar filter=lfs diff=lfs merge=lfs -text
 
 
 
 
3
  *.h5 filter=lfs diff=lfs merge=lfs -text
4
  *.pth filter=lfs diff=lfs merge=lfs -text
5
  *.tar filter=lfs diff=lfs merge=lfs -text
6
+ model_output/incremental_1_logs/tokenizer.model filter=lfs diff=lfs merge=lfs -text
7
+ merged_tinyllama_logger/tokenizer.model filter=lfs diff=lfs merge=lfs -text
8
+ model_output/incremental_1_logs/checkpoint-575/scheduler.pt filter=lfs diff=lfs merge=lfs -text
merged_tinyllama_logger/tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
3
+ size 499723
model_output/incremental_1_logs/checkpoint-575/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:db60df842ad12cea70ac8238ba2be455622ac32e26d4cb842a044126eddafb9d
3
+ size 14244
model_output/incremental_1_logs/checkpoint-575/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e164bde1413c3212f149a47133d7d6fc680e60537dbd755c476e37f7a92bf822
3
+ size 1064
model_output/incremental_1_logs/tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
3
+ size 499723
model_output/incremental_1_logs/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:290f55cf786a3f16cb2d84b8109bbdadf1005b363093d17cbd0bdbdb9cb95fdd
3
+ size 5176
phivenv/Lib/site-packages/huggingface_hub/cli/__pycache__/__init__.cpython-39.pyc ADDED
Binary file (814 Bytes). View file
 
phivenv/Lib/site-packages/huggingface_hub/cli/__pycache__/_cli_utils.cpython-39.pyc ADDED
Binary file (2.36 kB). View file
 
phivenv/Lib/site-packages/huggingface_hub/cli/__pycache__/upload.cpython-39.pyc ADDED
Binary file (8.59 kB). View file
 
phivenv/Lib/site-packages/huggingface_hub/cli/__pycache__/upload_large_folder.cpython-39.pyc ADDED
Binary file (4.76 kB). View file
 
phivenv/Lib/site-packages/huggingface_hub/commands/__init__.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from abc import ABC, abstractmethod
16
+ from argparse import _SubParsersAction
17
+
18
+
19
+ class BaseHuggingfaceCLICommand(ABC):
20
+ @staticmethod
21
+ @abstractmethod
22
+ def register_subcommand(parser: _SubParsersAction):
23
+ raise NotImplementedError()
24
+
25
+ @abstractmethod
26
+ def run(self):
27
+ raise NotImplementedError()
phivenv/Lib/site-packages/huggingface_hub/commands/__pycache__/__init__.cpython-39.pyc ADDED
Binary file (819 Bytes). View file
 
phivenv/Lib/site-packages/huggingface_hub/commands/__pycache__/_cli_utils.cpython-39.pyc ADDED
Binary file (2.68 kB). View file
 
phivenv/Lib/site-packages/huggingface_hub/commands/__pycache__/delete_cache.cpython-39.pyc ADDED
Binary file (14.8 kB). View file
 
phivenv/Lib/site-packages/huggingface_hub/commands/__pycache__/download.cpython-39.pyc ADDED
Binary file (5.75 kB). View file
 
phivenv/Lib/site-packages/huggingface_hub/commands/__pycache__/env.cpython-39.pyc ADDED
Binary file (1.31 kB). View file
 
phivenv/Lib/site-packages/huggingface_hub/commands/__pycache__/huggingface_cli.cpython-39.pyc ADDED
Binary file (1.87 kB). View file
 
phivenv/Lib/site-packages/huggingface_hub/commands/__pycache__/lfs.cpython-39.pyc ADDED
Binary file (5.77 kB). View file
 
phivenv/Lib/site-packages/huggingface_hub/commands/__pycache__/repo.cpython-39.pyc ADDED
Binary file (4.65 kB). View file
 
phivenv/Lib/site-packages/huggingface_hub/commands/__pycache__/repo_files.cpython-39.pyc ADDED
Binary file (4.14 kB). View file
 
phivenv/Lib/site-packages/huggingface_hub/commands/__pycache__/scan_cache.cpython-39.pyc ADDED
Binary file (7.81 kB). View file
 
phivenv/Lib/site-packages/huggingface_hub/commands/__pycache__/tag.cpython-39.pyc ADDED
Binary file (5.94 kB). View file
 
phivenv/Lib/site-packages/huggingface_hub/commands/__pycache__/upload.cpython-39.pyc ADDED
Binary file (8.82 kB). View file
 
phivenv/Lib/site-packages/huggingface_hub/commands/__pycache__/upload_large_folder.cpython-39.pyc ADDED
Binary file (4.88 kB). View file
 
phivenv/Lib/site-packages/huggingface_hub/commands/__pycache__/user.cpython-39.pyc ADDED
Binary file (7.66 kB). View file
 
phivenv/Lib/site-packages/huggingface_hub/commands/__pycache__/version.cpython-39.pyc ADDED
Binary file (1.37 kB). View file
 
phivenv/Lib/site-packages/huggingface_hub/commands/_cli_utils.py ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """Contains a utility for good-looking prints."""
15
+
16
+ import os
17
+ from typing import List, Union
18
+
19
+
20
+ class ANSI:
21
+ """
22
+ Helper for en.wikipedia.org/wiki/ANSI_escape_code
23
+ """
24
+
25
+ _bold = "\u001b[1m"
26
+ _gray = "\u001b[90m"
27
+ _red = "\u001b[31m"
28
+ _reset = "\u001b[0m"
29
+ _yellow = "\u001b[33m"
30
+
31
+ @classmethod
32
+ def bold(cls, s: str) -> str:
33
+ return cls._format(s, cls._bold)
34
+
35
+ @classmethod
36
+ def gray(cls, s: str) -> str:
37
+ return cls._format(s, cls._gray)
38
+
39
+ @classmethod
40
+ def red(cls, s: str) -> str:
41
+ return cls._format(s, cls._bold + cls._red)
42
+
43
+ @classmethod
44
+ def yellow(cls, s: str) -> str:
45
+ return cls._format(s, cls._yellow)
46
+
47
+ @classmethod
48
+ def _format(cls, s: str, code: str) -> str:
49
+ if os.environ.get("NO_COLOR"):
50
+ # See https://no-color.org/
51
+ return s
52
+ return f"{code}{s}{cls._reset}"
53
+
54
+
55
+ def tabulate(rows: List[List[Union[str, int]]], headers: List[str]) -> str:
56
+ """
57
+ Inspired by:
58
+
59
+ - stackoverflow.com/a/8356620/593036
60
+ - stackoverflow.com/questions/9535954/printing-lists-as-tabular-data
61
+ """
62
+ col_widths = [max(len(str(x)) for x in col) for col in zip(*rows, headers)]
63
+ row_format = ("{{:{}}} " * len(headers)).format(*col_widths)
64
+ lines = []
65
+ lines.append(row_format.format(*headers))
66
+ lines.append(row_format.format(*["-" * w for w in col_widths]))
67
+ for row in rows:
68
+ lines.append(row_format.format(*row))
69
+ return "\n".join(lines)
70
+
71
+
72
+ def show_deprecation_warning(old_command: str, new_command: str):
73
+ """Show a yellow warning about deprecated CLI command."""
74
+ print(ANSI.yellow(f"⚠️ Warning: '{old_command}' is deprecated. Use '{new_command}' instead."))
phivenv/Lib/site-packages/huggingface_hub/commands/delete_cache.py ADDED
@@ -0,0 +1,476 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022-present, the HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Contains command to delete some revisions from the HF cache directory.
16
+
17
+ Usage:
18
+ huggingface-cli delete-cache
19
+ huggingface-cli delete-cache --disable-tui
20
+ huggingface-cli delete-cache --dir ~/.cache/huggingface/hub
21
+ huggingface-cli delete-cache --sort=size
22
+
23
+ NOTE:
24
+ This command is based on `InquirerPy` to build the multiselect menu in the terminal.
25
+ This dependency has to be installed with `pip install huggingface_hub[cli]`. Since
26
+ we want to avoid as much as possible cross-platform issues, I chose a library that
27
+ is built on top of `python-prompt-toolkit` which seems to be a reference in terminal
28
+ GUI (actively maintained on both Unix and Windows, 7.9k stars).
29
+
30
+ For the moment, the TUI feature is in beta.
31
+
32
+ See:
33
+ - https://github.com/kazhala/InquirerPy
34
+ - https://inquirerpy.readthedocs.io/en/latest/
35
+ - https://github.com/prompt-toolkit/python-prompt-toolkit
36
+
37
+ Other solutions could have been:
38
+ - `simple_term_menu`: would be good as well for our use case but some issues suggest
39
+ that Windows is less supported.
40
+ See: https://github.com/IngoMeyer441/simple-term-menu
41
+ - `PyInquirer`: very similar to `InquirerPy` but older and not maintained anymore.
42
+ In particular, no support of Python3.10.
43
+ See: https://github.com/CITGuru/PyInquirer
44
+ - `pick` (or `pickpack`): easy to use and flexible but built on top of Python's
45
+ standard library `curses` that is specific to Unix (not implemented on Windows).
46
+ See https://github.com/wong2/pick and https://github.com/anafvana/pickpack.
47
+ - `inquirer`: lot of traction (700 stars) but explicitly states "experimental
48
+ support of Windows". Not built on top of `python-prompt-toolkit`.
49
+ See https://github.com/magmax/python-inquirer
50
+
51
+ TODO: add support for `huggingface-cli delete-cache aaaaaa bbbbbb cccccc (...)` ?
52
+ TODO: add "--keep-last" arg to delete revisions that are not on `main` ref
53
+ TODO: add "--filter" arg to filter repositories by name ?
54
+ TODO: add "--limit" arg to limit to X repos ?
55
+ TODO: add "-y" arg for immediate deletion ?
56
+ See discussions in https://github.com/huggingface/huggingface_hub/issues/1025.
57
+ """
58
+
59
+ import os
60
+ from argparse import Namespace, _SubParsersAction
61
+ from functools import wraps
62
+ from tempfile import mkstemp
63
+ from typing import Any, Callable, Iterable, List, Literal, Optional, Union
64
+
65
+ from ..utils import CachedRepoInfo, CachedRevisionInfo, HFCacheInfo, scan_cache_dir
66
+ from . import BaseHuggingfaceCLICommand
67
+ from ._cli_utils import ANSI, show_deprecation_warning
68
+
69
+
70
+ try:
71
+ from InquirerPy import inquirer
72
+ from InquirerPy.base.control import Choice
73
+ from InquirerPy.separator import Separator
74
+
75
+ _inquirer_py_available = True
76
+ except ImportError:
77
+ _inquirer_py_available = False
78
+
79
+ SortingOption_T = Literal["alphabetical", "lastUpdated", "lastUsed", "size"]
80
+
81
+
82
+ def require_inquirer_py(fn: Callable) -> Callable:
83
+ """Decorator to flag methods that require `InquirerPy`."""
84
+
85
+ # TODO: refactor this + imports in a unified pattern across codebase
86
+ @wraps(fn)
87
+ def _inner(*args, **kwargs):
88
+ if not _inquirer_py_available:
89
+ raise ImportError(
90
+ "The `delete-cache` command requires extra dependencies to work with"
91
+ " the TUI.\nPlease run `pip install huggingface_hub[cli]` to install"
92
+ " them.\nOtherwise, disable TUI using the `--disable-tui` flag."
93
+ )
94
+
95
+ return fn(*args, **kwargs)
96
+
97
+ return _inner
98
+
99
+
100
+ # Possibility for the user to cancel deletion
101
+ _CANCEL_DELETION_STR = "CANCEL_DELETION"
102
+
103
+
104
+ class DeleteCacheCommand(BaseHuggingfaceCLICommand):
105
+ @staticmethod
106
+ def register_subcommand(parser: _SubParsersAction):
107
+ delete_cache_parser = parser.add_parser("delete-cache", help="Delete revisions from the cache directory.")
108
+
109
+ delete_cache_parser.add_argument(
110
+ "--dir",
111
+ type=str,
112
+ default=None,
113
+ help="cache directory (optional). Default to the default HuggingFace cache.",
114
+ )
115
+
116
+ delete_cache_parser.add_argument(
117
+ "--disable-tui",
118
+ action="store_true",
119
+ help=(
120
+ "Disable Terminal User Interface (TUI) mode. Useful if your"
121
+ " platform/terminal doesn't support the multiselect menu."
122
+ ),
123
+ )
124
+
125
+ delete_cache_parser.add_argument(
126
+ "--sort",
127
+ nargs="?",
128
+ choices=["alphabetical", "lastUpdated", "lastUsed", "size"],
129
+ help=(
130
+ "Sort repositories by the specified criteria. Options: "
131
+ "'alphabetical' (A-Z), "
132
+ "'lastUpdated' (newest first), "
133
+ "'lastUsed' (most recent first), "
134
+ "'size' (largest first)."
135
+ ),
136
+ )
137
+
138
+ delete_cache_parser.set_defaults(func=DeleteCacheCommand)
139
+
140
+ def __init__(self, args: Namespace) -> None:
141
+ self.cache_dir: Optional[str] = args.dir
142
+ self.disable_tui: bool = args.disable_tui
143
+ self.sort_by: Optional[SortingOption_T] = args.sort
144
+
145
+ def run(self):
146
+ """Run `delete-cache` command with or without TUI."""
147
+ show_deprecation_warning("huggingface-cli delete-cache", "hf cache delete")
148
+
149
+ # Scan cache directory
150
+ hf_cache_info = scan_cache_dir(self.cache_dir)
151
+
152
+ # Manual review from the user
153
+ if self.disable_tui:
154
+ selected_hashes = _manual_review_no_tui(hf_cache_info, preselected=[], sort_by=self.sort_by)
155
+ else:
156
+ selected_hashes = _manual_review_tui(hf_cache_info, preselected=[], sort_by=self.sort_by)
157
+
158
+ # If deletion is not cancelled
159
+ if len(selected_hashes) > 0 and _CANCEL_DELETION_STR not in selected_hashes:
160
+ confirm_message = _get_expectations_str(hf_cache_info, selected_hashes) + " Confirm deletion ?"
161
+
162
+ # Confirm deletion
163
+ if self.disable_tui:
164
+ confirmed = _ask_for_confirmation_no_tui(confirm_message)
165
+ else:
166
+ confirmed = _ask_for_confirmation_tui(confirm_message)
167
+
168
+ # Deletion is confirmed
169
+ if confirmed:
170
+ strategy = hf_cache_info.delete_revisions(*selected_hashes)
171
+ print("Start deletion.")
172
+ strategy.execute()
173
+ print(
174
+ f"Done. Deleted {len(strategy.repos)} repo(s) and"
175
+ f" {len(strategy.snapshots)} revision(s) for a total of"
176
+ f" {strategy.expected_freed_size_str}."
177
+ )
178
+ return
179
+
180
+ # Deletion is cancelled
181
+ print("Deletion is cancelled. Do nothing.")
182
+
183
+
184
+ def _get_repo_sorting_key(repo: CachedRepoInfo, sort_by: Optional[SortingOption_T] = None):
185
+ if sort_by == "alphabetical":
186
+ return (repo.repo_type, repo.repo_id.lower()) # by type then name
187
+ elif sort_by == "lastUpdated":
188
+ return -max(rev.last_modified for rev in repo.revisions) # newest first
189
+ elif sort_by == "lastUsed":
190
+ return -repo.last_accessed # most recently used first
191
+ elif sort_by == "size":
192
+ return -repo.size_on_disk # largest first
193
+ else:
194
+ return (repo.repo_type, repo.repo_id) # default stable order
195
+
196
+
197
+ @require_inquirer_py
198
+ def _manual_review_tui(
199
+ hf_cache_info: HFCacheInfo,
200
+ preselected: List[str],
201
+ sort_by: Optional[SortingOption_T] = None,
202
+ ) -> List[str]:
203
+ """Ask the user for a manual review of the revisions to delete.
204
+
205
+ Displays a multi-select menu in the terminal (TUI).
206
+ """
207
+ # Define multiselect list
208
+ choices = _get_tui_choices_from_scan(
209
+ repos=hf_cache_info.repos,
210
+ preselected=preselected,
211
+ sort_by=sort_by,
212
+ )
213
+ checkbox = inquirer.checkbox(
214
+ message="Select revisions to delete:",
215
+ choices=choices, # List of revisions with some pre-selection
216
+ cycle=False, # No loop between top and bottom
217
+ height=100, # Large list if possible
218
+ # We use the instruction to display to the user the expected effect of the
219
+ # deletion.
220
+ instruction=_get_expectations_str(
221
+ hf_cache_info,
222
+ selected_hashes=[c.value for c in choices if isinstance(c, Choice) and c.enabled],
223
+ ),
224
+ # We use the long instruction to should keybindings instructions to the user
225
+ long_instruction="Press <space> to select, <enter> to validate and <ctrl+c> to quit without modification.",
226
+ # Message that is displayed once the user validates its selection.
227
+ transformer=lambda result: f"{len(result)} revision(s) selected.",
228
+ )
229
+
230
+ # Add a callback to update the information line when a revision is
231
+ # selected/unselected
232
+ def _update_expectations(_) -> None:
233
+ # Hacky way to dynamically set an instruction message to the checkbox when
234
+ # a revision hash is selected/unselected.
235
+ checkbox._instruction = _get_expectations_str(
236
+ hf_cache_info,
237
+ selected_hashes=[choice["value"] for choice in checkbox.content_control.choices if choice["enabled"]],
238
+ )
239
+
240
+ checkbox.kb_func_lookup["toggle"].append({"func": _update_expectations})
241
+
242
+ # Finally display the form to the user.
243
+ try:
244
+ return checkbox.execute()
245
+ except KeyboardInterrupt:
246
+ return [] # Quit without deletion
247
+
248
+
249
+ @require_inquirer_py
250
+ def _ask_for_confirmation_tui(message: str, default: bool = True) -> bool:
251
+ """Ask for confirmation using Inquirer."""
252
+ return inquirer.confirm(message, default=default).execute()
253
+
254
+
255
+ def _get_tui_choices_from_scan(
256
+ repos: Iterable[CachedRepoInfo],
257
+ preselected: List[str],
258
+ sort_by: Optional[SortingOption_T] = None,
259
+ ) -> List:
260
+ """Build a list of choices from the scanned repos.
261
+
262
+ Args:
263
+ repos (*Iterable[`CachedRepoInfo`]*):
264
+ List of scanned repos on which we want to delete revisions.
265
+ preselected (*List[`str`]*):
266
+ List of revision hashes that will be preselected.
267
+ sort_by (*Optional[SortingOption_T]*):
268
+ Sorting direction. Choices: "alphabetical", "lastUpdated", "lastUsed", "size".
269
+
270
+ Return:
271
+ The list of choices to pass to `inquirer.checkbox`.
272
+ """
273
+ choices: List[Union[Choice, Separator]] = []
274
+
275
+ # First choice is to cancel the deletion
276
+ choices.append(
277
+ Choice(
278
+ _CANCEL_DELETION_STR,
279
+ name="None of the following (if selected, nothing will be deleted).",
280
+ enabled=False,
281
+ )
282
+ )
283
+
284
+ # Sort repos based on specified criteria
285
+ sorted_repos = sorted(repos, key=lambda repo: _get_repo_sorting_key(repo, sort_by))
286
+
287
+ for repo in sorted_repos:
288
+ # Repo as separator
289
+ choices.append(
290
+ Separator(
291
+ f"\n{repo.repo_type.capitalize()} {repo.repo_id} ({repo.size_on_disk_str},"
292
+ f" used {repo.last_accessed_str})"
293
+ )
294
+ )
295
+ for revision in sorted(repo.revisions, key=_revision_sorting_order):
296
+ # Revision as choice
297
+ choices.append(
298
+ Choice(
299
+ revision.commit_hash,
300
+ name=(
301
+ f"{revision.commit_hash[:8]}:"
302
+ f" {', '.join(sorted(revision.refs)) or '(detached)'} #"
303
+ f" modified {revision.last_modified_str}"
304
+ ),
305
+ enabled=revision.commit_hash in preselected,
306
+ )
307
+ )
308
+
309
+ # Return choices
310
+ return choices
311
+
312
+
313
+ def _manual_review_no_tui(
314
+ hf_cache_info: HFCacheInfo,
315
+ preselected: List[str],
316
+ sort_by: Optional[SortingOption_T] = None,
317
+ ) -> List[str]:
318
+ """Ask the user for a manual review of the revisions to delete.
319
+
320
+ Used when TUI is disabled. Manual review happens in a separate tmp file that the
321
+ user can manually edit.
322
+ """
323
+ # 1. Generate temporary file with delete commands.
324
+ fd, tmp_path = mkstemp(suffix=".txt") # suffix to make it easier to find by editors
325
+ os.close(fd)
326
+
327
+ lines = []
328
+
329
+ sorted_repos = sorted(hf_cache_info.repos, key=lambda repo: _get_repo_sorting_key(repo, sort_by))
330
+
331
+ for repo in sorted_repos:
332
+ lines.append(
333
+ f"\n# {repo.repo_type.capitalize()} {repo.repo_id} ({repo.size_on_disk_str},"
334
+ f" used {repo.last_accessed_str})"
335
+ )
336
+ for revision in sorted(repo.revisions, key=_revision_sorting_order):
337
+ lines.append(
338
+ # Deselect by prepending a '#'
339
+ f"{'' if revision.commit_hash in preselected else '#'} "
340
+ f" {revision.commit_hash} # Refs:"
341
+ # Print `refs` as comment on same line
342
+ f" {', '.join(sorted(revision.refs)) or '(detached)'} # modified"
343
+ # Print `last_modified` as comment on same line
344
+ f" {revision.last_modified_str}"
345
+ )
346
+
347
+ with open(tmp_path, "w") as f:
348
+ f.write(_MANUAL_REVIEW_NO_TUI_INSTRUCTIONS)
349
+ f.write("\n".join(lines))
350
+
351
+ # 2. Prompt instructions to user.
352
+ instructions = f"""
353
+ TUI is disabled. In order to select which revisions you want to delete, please edit
354
+ the following file using the text editor of your choice. Instructions for manual
355
+ editing are located at the beginning of the file. Edit the file, save it and confirm
356
+ to continue.
357
+ File to edit: {ANSI.bold(tmp_path)}
358
+ """
359
+ print("\n".join(line.strip() for line in instructions.strip().split("\n")))
360
+
361
+ # 3. Wait for user confirmation.
362
+ while True:
363
+ selected_hashes = _read_manual_review_tmp_file(tmp_path)
364
+ if _ask_for_confirmation_no_tui(
365
+ _get_expectations_str(hf_cache_info, selected_hashes) + " Continue ?",
366
+ default=False,
367
+ ):
368
+ break
369
+
370
+ # 4. Return selected_hashes sorted to maintain stable order
371
+ os.remove(tmp_path)
372
+ return sorted(selected_hashes) # Sort to maintain stable order
373
+
374
+
375
+ def _ask_for_confirmation_no_tui(message: str, default: bool = True) -> bool:
376
+ """Ask for confirmation using pure-python."""
377
+ YES = ("y", "yes", "1")
378
+ NO = ("n", "no", "0")
379
+ DEFAULT = ""
380
+ ALL = YES + NO + (DEFAULT,)
381
+ full_message = message + (" (Y/n) " if default else " (y/N) ")
382
+ while True:
383
+ answer = input(full_message).lower()
384
+ if answer == DEFAULT:
385
+ return default
386
+ if answer in YES:
387
+ return True
388
+ if answer in NO:
389
+ return False
390
+ print(f"Invalid input. Must be one of {ALL}")
391
+
392
+
393
+ def _get_expectations_str(hf_cache_info: HFCacheInfo, selected_hashes: List[str]) -> str:
394
+ """Format a string to display to the user how much space would be saved.
395
+
396
+ Example:
397
+ ```
398
+ >>> _get_expectations_str(hf_cache_info, selected_hashes)
399
+ '7 revisions selected counting for 4.3G.'
400
+ ```
401
+ """
402
+ if _CANCEL_DELETION_STR in selected_hashes:
403
+ return "Nothing will be deleted."
404
+ strategy = hf_cache_info.delete_revisions(*selected_hashes)
405
+ return f"{len(selected_hashes)} revisions selected counting for {strategy.expected_freed_size_str}."
406
+
407
+
408
+ def _read_manual_review_tmp_file(tmp_path: str) -> List[str]:
409
+ """Read the manually reviewed instruction file and return a list of revision hash.
410
+
411
+ Example:
412
+ ```txt
413
+ # This is the tmp file content
414
+ ###
415
+
416
+ # Commented out line
417
+ 123456789 # revision hash
418
+
419
+ # Something else
420
+ # a_newer_hash # 2 days ago
421
+ an_older_hash # 3 days ago
422
+ ```
423
+
424
+ ```py
425
+ >>> _read_manual_review_tmp_file(tmp_path)
426
+ ['123456789', 'an_older_hash']
427
+ ```
428
+ """
429
+ with open(tmp_path) as f:
430
+ content = f.read()
431
+
432
+ # Split lines
433
+ lines = [line.strip() for line in content.split("\n")]
434
+
435
+ # Filter commented lines
436
+ selected_lines = [line for line in lines if not line.startswith("#")]
437
+
438
+ # Select only before comment
439
+ selected_hashes = [line.split("#")[0].strip() for line in selected_lines]
440
+
441
+ # Return revision hashes
442
+ return [hash for hash in selected_hashes if len(hash) > 0]
443
+
444
+
445
+ _MANUAL_REVIEW_NO_TUI_INSTRUCTIONS = f"""
446
+ # INSTRUCTIONS
447
+ # ------------
448
+ # This is a temporary file created by running `huggingface-cli delete-cache` with the
449
+ # `--disable-tui` option. It contains a set of revisions that can be deleted from your
450
+ # local cache directory.
451
+ #
452
+ # Please manually review the revisions you want to delete:
453
+ # - Revision hashes can be commented out with '#'.
454
+ # - Only non-commented revisions in this file will be deleted.
455
+ # - Revision hashes that are removed from this file are ignored as well.
456
+ # - If `{_CANCEL_DELETION_STR}` line is uncommented, the all cache deletion is cancelled and
457
+ # no changes will be applied.
458
+ #
459
+ # Once you've manually reviewed this file, please confirm deletion in the terminal. This
460
+ # file will be automatically removed once done.
461
+ # ------------
462
+
463
+ # KILL SWITCH
464
+ # ------------
465
+ # Un-comment following line to completely cancel the deletion process
466
+ # {_CANCEL_DELETION_STR}
467
+ # ------------
468
+
469
+ # REVISIONS
470
+ # ------------
471
+ """.strip()
472
+
473
+
474
+ def _revision_sorting_order(revision: CachedRevisionInfo) -> Any:
475
+ # Sort by last modified (oldest first)
476
+ return revision.last_modified
phivenv/Lib/site-packages/huggingface_hub/commands/download.py ADDED
@@ -0,0 +1,204 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023-present, the HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Contains command to download files from the Hub with the CLI.
16
+
17
+ Usage:
18
+ huggingface-cli download --help
19
+
20
+ # Download file
21
+ huggingface-cli download gpt2 config.json
22
+
23
+ # Download entire repo
24
+ huggingface-cli download fffiloni/zeroscope --repo-type=space --revision=refs/pr/78
25
+
26
+ # Download repo with filters
27
+ huggingface-cli download gpt2 --include="*.safetensors"
28
+
29
+ # Download with token
30
+ huggingface-cli download Wauplin/private-model --token=hf_***
31
+
32
+ # Download quietly (no progress bar, no warnings, only the returned path)
33
+ huggingface-cli download gpt2 config.json --quiet
34
+
35
+ # Download to local dir
36
+ huggingface-cli download gpt2 --local-dir=./models/gpt2
37
+ """
38
+
39
+ import warnings
40
+ from argparse import Namespace, _SubParsersAction
41
+ from typing import List, Optional
42
+
43
+ from huggingface_hub import logging
44
+ from huggingface_hub._snapshot_download import snapshot_download
45
+ from huggingface_hub.commands import BaseHuggingfaceCLICommand
46
+ from huggingface_hub.file_download import hf_hub_download
47
+ from huggingface_hub.utils import disable_progress_bars, enable_progress_bars
48
+
49
+ from ._cli_utils import show_deprecation_warning
50
+
51
+
52
+ logger = logging.get_logger(__name__)
53
+
54
+
55
+ class DownloadCommand(BaseHuggingfaceCLICommand):
56
+ @staticmethod
57
+ def register_subcommand(parser: _SubParsersAction):
58
+ download_parser = parser.add_parser("download", help="Download files from the Hub")
59
+ download_parser.add_argument(
60
+ "repo_id", type=str, help="ID of the repo to download from (e.g. `username/repo-name`)."
61
+ )
62
+ download_parser.add_argument(
63
+ "filenames", type=str, nargs="*", help="Files to download (e.g. `config.json`, `data/metadata.jsonl`)."
64
+ )
65
+ download_parser.add_argument(
66
+ "--repo-type",
67
+ choices=["model", "dataset", "space"],
68
+ default="model",
69
+ help="Type of repo to download from (defaults to 'model').",
70
+ )
71
+ download_parser.add_argument(
72
+ "--revision",
73
+ type=str,
74
+ help="An optional Git revision id which can be a branch name, a tag, or a commit hash.",
75
+ )
76
+ download_parser.add_argument(
77
+ "--include", nargs="*", type=str, help="Glob patterns to match files to download."
78
+ )
79
+ download_parser.add_argument(
80
+ "--exclude", nargs="*", type=str, help="Glob patterns to exclude from files to download."
81
+ )
82
+ download_parser.add_argument(
83
+ "--cache-dir", type=str, help="Path to the directory where to save the downloaded files."
84
+ )
85
+ download_parser.add_argument(
86
+ "--local-dir",
87
+ type=str,
88
+ help=(
89
+ "If set, the downloaded file will be placed under this directory. Check out"
90
+ " https://huggingface.co/docs/huggingface_hub/guides/download#download-files-to-local-folder for more"
91
+ " details."
92
+ ),
93
+ )
94
+ download_parser.add_argument(
95
+ "--local-dir-use-symlinks",
96
+ choices=["auto", "True", "False"],
97
+ help=("Deprecated and ignored. Downloading to a local directory does not use symlinks anymore."),
98
+ )
99
+ download_parser.add_argument(
100
+ "--force-download",
101
+ action="store_true",
102
+ help="If True, the files will be downloaded even if they are already cached.",
103
+ )
104
+ download_parser.add_argument(
105
+ "--resume-download",
106
+ action="store_true",
107
+ help="Deprecated and ignored. Downloading a file to local dir always attempts to resume previously interrupted downloads (unless hf-transfer is enabled).",
108
+ )
109
+ download_parser.add_argument(
110
+ "--token", type=str, help="A User Access Token generated from https://huggingface.co/settings/tokens"
111
+ )
112
+ download_parser.add_argument(
113
+ "--quiet",
114
+ action="store_true",
115
+ help="If True, progress bars are disabled and only the path to the download files is printed.",
116
+ )
117
+ download_parser.add_argument(
118
+ "--max-workers",
119
+ type=int,
120
+ default=8,
121
+ help="Maximum number of workers to use for downloading files. Default is 8.",
122
+ )
123
+ download_parser.set_defaults(func=DownloadCommand)
124
+
125
+ def __init__(self, args: Namespace) -> None:
126
+ self.token = args.token
127
+ self.repo_id: str = args.repo_id
128
+ self.filenames: List[str] = args.filenames
129
+ self.repo_type: str = args.repo_type
130
+ self.revision: Optional[str] = args.revision
131
+ self.include: Optional[List[str]] = args.include
132
+ self.exclude: Optional[List[str]] = args.exclude
133
+ self.cache_dir: Optional[str] = args.cache_dir
134
+ self.local_dir: Optional[str] = args.local_dir
135
+ self.force_download: bool = args.force_download
136
+ self.resume_download: Optional[bool] = args.resume_download or None
137
+ self.quiet: bool = args.quiet
138
+ self.max_workers: int = args.max_workers
139
+
140
+ if args.local_dir_use_symlinks is not None:
141
+ warnings.warn(
142
+ "Ignoring --local-dir-use-symlinks. Downloading to a local directory does not use symlinks anymore.",
143
+ FutureWarning,
144
+ )
145
+
146
+ def run(self) -> None:
147
+ show_deprecation_warning("huggingface-cli download", "hf download")
148
+
149
+ if self.quiet:
150
+ disable_progress_bars()
151
+ with warnings.catch_warnings():
152
+ warnings.simplefilter("ignore")
153
+ print(self._download()) # Print path to downloaded files
154
+ enable_progress_bars()
155
+ else:
156
+ logging.set_verbosity_info()
157
+ print(self._download()) # Print path to downloaded files
158
+ logging.set_verbosity_warning()
159
+
160
+ def _download(self) -> str:
161
+ # Warn user if patterns are ignored
162
+ if len(self.filenames) > 0:
163
+ if self.include is not None and len(self.include) > 0:
164
+ warnings.warn("Ignoring `--include` since filenames have being explicitly set.")
165
+ if self.exclude is not None and len(self.exclude) > 0:
166
+ warnings.warn("Ignoring `--exclude` since filenames have being explicitly set.")
167
+
168
+ # Single file to download: use `hf_hub_download`
169
+ if len(self.filenames) == 1:
170
+ return hf_hub_download(
171
+ repo_id=self.repo_id,
172
+ repo_type=self.repo_type,
173
+ revision=self.revision,
174
+ filename=self.filenames[0],
175
+ cache_dir=self.cache_dir,
176
+ resume_download=self.resume_download,
177
+ force_download=self.force_download,
178
+ token=self.token,
179
+ local_dir=self.local_dir,
180
+ library_name="huggingface-cli",
181
+ )
182
+
183
+ # Otherwise: use `snapshot_download` to ensure all files comes from same revision
184
+ elif len(self.filenames) == 0:
185
+ allow_patterns = self.include
186
+ ignore_patterns = self.exclude
187
+ else:
188
+ allow_patterns = self.filenames
189
+ ignore_patterns = None
190
+
191
+ return snapshot_download(
192
+ repo_id=self.repo_id,
193
+ repo_type=self.repo_type,
194
+ revision=self.revision,
195
+ allow_patterns=allow_patterns,
196
+ ignore_patterns=ignore_patterns,
197
+ resume_download=self.resume_download,
198
+ force_download=self.force_download,
199
+ cache_dir=self.cache_dir,
200
+ token=self.token,
201
+ local_dir=self.local_dir,
202
+ library_name="huggingface-cli",
203
+ max_workers=self.max_workers,
204
+ )
phivenv/Lib/site-packages/huggingface_hub/commands/env.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """Contains command to print information about the environment.
15
+
16
+ Usage:
17
+ huggingface-cli env
18
+ """
19
+
20
+ from argparse import _SubParsersAction
21
+
22
+ from ..utils import dump_environment_info
23
+ from . import BaseHuggingfaceCLICommand
24
+ from ._cli_utils import show_deprecation_warning
25
+
26
+
27
+ class EnvironmentCommand(BaseHuggingfaceCLICommand):
28
+ def __init__(self, args):
29
+ self.args = args
30
+
31
+ @staticmethod
32
+ def register_subcommand(parser: _SubParsersAction):
33
+ env_parser = parser.add_parser("env", help="Print information about the environment.")
34
+ env_parser.set_defaults(func=EnvironmentCommand)
35
+
36
+ def run(self) -> None:
37
+ show_deprecation_warning("huggingface-cli env", "hf env")
38
+
39
+ dump_environment_info()
phivenv/Lib/site-packages/huggingface_hub/commands/huggingface_cli.py ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from argparse import ArgumentParser
16
+
17
+ from huggingface_hub.commands._cli_utils import show_deprecation_warning
18
+ from huggingface_hub.commands.delete_cache import DeleteCacheCommand
19
+ from huggingface_hub.commands.download import DownloadCommand
20
+ from huggingface_hub.commands.env import EnvironmentCommand
21
+ from huggingface_hub.commands.lfs import LfsCommands
22
+ from huggingface_hub.commands.repo import RepoCommands
23
+ from huggingface_hub.commands.repo_files import RepoFilesCommand
24
+ from huggingface_hub.commands.scan_cache import ScanCacheCommand
25
+ from huggingface_hub.commands.tag import TagCommands
26
+ from huggingface_hub.commands.upload import UploadCommand
27
+ from huggingface_hub.commands.upload_large_folder import UploadLargeFolderCommand
28
+ from huggingface_hub.commands.user import UserCommands
29
+ from huggingface_hub.commands.version import VersionCommand
30
+
31
+
32
+ def main():
33
+ parser = ArgumentParser("huggingface-cli", usage="huggingface-cli <command> [<args>]")
34
+ commands_parser = parser.add_subparsers(help="huggingface-cli command helpers")
35
+
36
+ # Register commands
37
+ DownloadCommand.register_subcommand(commands_parser)
38
+ UploadCommand.register_subcommand(commands_parser)
39
+ RepoFilesCommand.register_subcommand(commands_parser)
40
+ EnvironmentCommand.register_subcommand(commands_parser)
41
+ UserCommands.register_subcommand(commands_parser)
42
+ RepoCommands.register_subcommand(commands_parser)
43
+ LfsCommands.register_subcommand(commands_parser)
44
+ ScanCacheCommand.register_subcommand(commands_parser)
45
+ DeleteCacheCommand.register_subcommand(commands_parser)
46
+ TagCommands.register_subcommand(commands_parser)
47
+ VersionCommand.register_subcommand(commands_parser)
48
+
49
+ # Experimental
50
+ UploadLargeFolderCommand.register_subcommand(commands_parser)
51
+
52
+ # Let's go
53
+ args = parser.parse_args()
54
+ if not hasattr(args, "func"):
55
+ show_deprecation_warning("huggingface-cli", "hf")
56
+ parser.print_help()
57
+ exit(1)
58
+
59
+ # Run
60
+ service = args.func(args)
61
+ service.run()
62
+
63
+
64
+ if __name__ == "__main__":
65
+ main()
phivenv/Lib/site-packages/huggingface_hub/commands/lfs.py ADDED
@@ -0,0 +1,200 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Implementation of a custom transfer agent for the transfer type "multipart" for
3
+ git-lfs.
4
+
5
+ Inspired by:
6
+ github.com/cbartz/git-lfs-swift-transfer-agent/blob/master/git_lfs_swift_transfer.py
7
+
8
+ Spec is: github.com/git-lfs/git-lfs/blob/master/docs/custom-transfers.md
9
+
10
+
11
+ To launch debugger while developing:
12
+
13
+ ``` [lfs "customtransfer.multipart"]
14
+ path = /path/to/huggingface_hub/.env/bin/python args = -m debugpy --listen 5678
15
+ --wait-for-client
16
+ /path/to/huggingface_hub/src/huggingface_hub/commands/huggingface_cli.py
17
+ lfs-multipart-upload ```"""
18
+
19
+ import json
20
+ import os
21
+ import subprocess
22
+ import sys
23
+ from argparse import _SubParsersAction
24
+ from typing import Dict, List, Optional
25
+
26
+ from huggingface_hub.commands import BaseHuggingfaceCLICommand
27
+ from huggingface_hub.lfs import LFS_MULTIPART_UPLOAD_COMMAND
28
+
29
+ from ..utils import get_session, hf_raise_for_status, logging
30
+ from ..utils._lfs import SliceFileObj
31
+
32
+
33
+ logger = logging.get_logger(__name__)
34
+
35
+
36
+ class LfsCommands(BaseHuggingfaceCLICommand):
37
+ """
38
+ Implementation of a custom transfer agent for the transfer type "multipart"
39
+ for git-lfs. This lets users upload large files >5GB 🔥. Spec for LFS custom
40
+ transfer agent is:
41
+ https://github.com/git-lfs/git-lfs/blob/master/docs/custom-transfers.md
42
+
43
+ This introduces two commands to the CLI:
44
+
45
+ 1. $ huggingface-cli lfs-enable-largefiles
46
+
47
+ This should be executed once for each model repo that contains a model file
48
+ >5GB. It's documented in the error message you get if you just try to git
49
+ push a 5GB file without having enabled it before.
50
+
51
+ 2. $ huggingface-cli lfs-multipart-upload
52
+
53
+ This command is called by lfs directly and is not meant to be called by the
54
+ user.
55
+ """
56
+
57
+ @staticmethod
58
+ def register_subcommand(parser: _SubParsersAction):
59
+ enable_parser = parser.add_parser(
60
+ "lfs-enable-largefiles", help="Configure your repository to enable upload of files > 5GB."
61
+ )
62
+ enable_parser.add_argument("path", type=str, help="Local path to repository you want to configure.")
63
+ enable_parser.set_defaults(func=lambda args: LfsEnableCommand(args))
64
+
65
+ # Command will get called by git-lfs, do not call it directly.
66
+ upload_parser = parser.add_parser(LFS_MULTIPART_UPLOAD_COMMAND, add_help=False)
67
+ upload_parser.set_defaults(func=lambda args: LfsUploadCommand(args))
68
+
69
+
70
+ class LfsEnableCommand:
71
+ def __init__(self, args):
72
+ self.args = args
73
+
74
+ def run(self):
75
+ local_path = os.path.abspath(self.args.path)
76
+ if not os.path.isdir(local_path):
77
+ print("This does not look like a valid git repo.")
78
+ exit(1)
79
+ subprocess.run(
80
+ "git config lfs.customtransfer.multipart.path huggingface-cli".split(),
81
+ check=True,
82
+ cwd=local_path,
83
+ )
84
+ subprocess.run(
85
+ f"git config lfs.customtransfer.multipart.args {LFS_MULTIPART_UPLOAD_COMMAND}".split(),
86
+ check=True,
87
+ cwd=local_path,
88
+ )
89
+ print("Local repo set up for largefiles")
90
+
91
+
92
+ def write_msg(msg: Dict):
93
+ """Write out the message in Line delimited JSON."""
94
+ msg_str = json.dumps(msg) + "\n"
95
+ sys.stdout.write(msg_str)
96
+ sys.stdout.flush()
97
+
98
+
99
+ def read_msg() -> Optional[Dict]:
100
+ """Read Line delimited JSON from stdin."""
101
+ msg = json.loads(sys.stdin.readline().strip())
102
+
103
+ if "terminate" in (msg.get("type"), msg.get("event")):
104
+ # terminate message received
105
+ return None
106
+
107
+ if msg.get("event") not in ("download", "upload"):
108
+ logger.critical("Received unexpected message")
109
+ sys.exit(1)
110
+
111
+ return msg
112
+
113
+
114
+ class LfsUploadCommand:
115
+ def __init__(self, args) -> None:
116
+ self.args = args
117
+
118
+ def run(self) -> None:
119
+ # Immediately after invoking a custom transfer process, git-lfs
120
+ # sends initiation data to the process over stdin.
121
+ # This tells the process useful information about the configuration.
122
+ init_msg = json.loads(sys.stdin.readline().strip())
123
+ if not (init_msg.get("event") == "init" and init_msg.get("operation") == "upload"):
124
+ write_msg({"error": {"code": 32, "message": "Wrong lfs init operation"}})
125
+ sys.exit(1)
126
+
127
+ # The transfer process should use the information it needs from the
128
+ # initiation structure, and also perform any one-off setup tasks it
129
+ # needs to do. It should then respond on stdout with a simple empty
130
+ # confirmation structure, as follows:
131
+ write_msg({})
132
+
133
+ # After the initiation exchange, git-lfs will send any number of
134
+ # transfer requests to the stdin of the transfer process, in a serial sequence.
135
+ while True:
136
+ msg = read_msg()
137
+ if msg is None:
138
+ # When all transfers have been processed, git-lfs will send
139
+ # a terminate event to the stdin of the transfer process.
140
+ # On receiving this message the transfer process should
141
+ # clean up and terminate. No response is expected.
142
+ sys.exit(0)
143
+
144
+ oid = msg["oid"]
145
+ filepath = msg["path"]
146
+ completion_url = msg["action"]["href"]
147
+ header = msg["action"]["header"]
148
+ chunk_size = int(header.pop("chunk_size"))
149
+ presigned_urls: List[str] = list(header.values())
150
+
151
+ # Send a "started" progress event to allow other workers to start.
152
+ # Otherwise they're delayed until first "progress" event is reported,
153
+ # i.e. after the first 5GB by default (!)
154
+ write_msg(
155
+ {
156
+ "event": "progress",
157
+ "oid": oid,
158
+ "bytesSoFar": 1,
159
+ "bytesSinceLast": 0,
160
+ }
161
+ )
162
+
163
+ parts = []
164
+ with open(filepath, "rb") as file:
165
+ for i, presigned_url in enumerate(presigned_urls):
166
+ with SliceFileObj(
167
+ file,
168
+ seek_from=i * chunk_size,
169
+ read_limit=chunk_size,
170
+ ) as data:
171
+ r = get_session().put(presigned_url, data=data)
172
+ hf_raise_for_status(r)
173
+ parts.append(
174
+ {
175
+ "etag": r.headers.get("etag"),
176
+ "partNumber": i + 1,
177
+ }
178
+ )
179
+ # In order to support progress reporting while data is uploading / downloading,
180
+ # the transfer process should post messages to stdout
181
+ write_msg(
182
+ {
183
+ "event": "progress",
184
+ "oid": oid,
185
+ "bytesSoFar": (i + 1) * chunk_size,
186
+ "bytesSinceLast": chunk_size,
187
+ }
188
+ )
189
+ # Not precise but that's ok.
190
+
191
+ r = get_session().post(
192
+ completion_url,
193
+ json={
194
+ "oid": oid,
195
+ "parts": parts,
196
+ },
197
+ )
198
+ hf_raise_for_status(r)
199
+
200
+ write_msg({"event": "complete", "oid": oid})
phivenv/Lib/site-packages/huggingface_hub/commands/repo.py ADDED
@@ -0,0 +1,151 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2025 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """Contains commands to interact with repositories on the Hugging Face Hub.
15
+
16
+ Usage:
17
+ # create a new dataset repo on the Hub
18
+ huggingface-cli repo create my-cool-dataset --repo-type=dataset
19
+
20
+ # create a private model repo on the Hub
21
+ huggingface-cli repo create my-cool-model --private
22
+ """
23
+
24
+ import argparse
25
+ from argparse import _SubParsersAction
26
+ from typing import Optional
27
+
28
+ from huggingface_hub.commands import BaseHuggingfaceCLICommand
29
+ from huggingface_hub.commands._cli_utils import ANSI
30
+ from huggingface_hub.constants import SPACES_SDK_TYPES
31
+ from huggingface_hub.hf_api import HfApi
32
+ from huggingface_hub.utils import logging
33
+
34
+ from ._cli_utils import show_deprecation_warning
35
+
36
+
37
+ logger = logging.get_logger(__name__)
38
+
39
+
40
+ class RepoCommands(BaseHuggingfaceCLICommand):
41
+ @staticmethod
42
+ def register_subcommand(parser: _SubParsersAction):
43
+ repo_parser = parser.add_parser("repo", help="{create} Commands to interact with your huggingface.co repos.")
44
+ repo_subparsers = repo_parser.add_subparsers(help="huggingface.co repos related commands")
45
+ repo_create_parser = repo_subparsers.add_parser("create", help="Create a new repo on huggingface.co")
46
+ repo_create_parser.add_argument(
47
+ "repo_id",
48
+ type=str,
49
+ help="The ID of the repo to create to (e.g. `username/repo-name`). The username is optional and will be set to your username if not provided.",
50
+ )
51
+ repo_create_parser.add_argument(
52
+ "--repo-type",
53
+ type=str,
54
+ help='Optional: set to "dataset" or "space" if creating a dataset or space, default is model.',
55
+ )
56
+ repo_create_parser.add_argument(
57
+ "--space_sdk",
58
+ type=str,
59
+ help='Optional: Hugging Face Spaces SDK type. Required when --type is set to "space".',
60
+ choices=SPACES_SDK_TYPES,
61
+ )
62
+ repo_create_parser.add_argument(
63
+ "--private",
64
+ action="store_true",
65
+ help="Whether to create a private repository. Defaults to public unless the organization's default is private.",
66
+ )
67
+ repo_create_parser.add_argument(
68
+ "--token",
69
+ type=str,
70
+ help="Hugging Face token. Will default to the locally saved token if not provided.",
71
+ )
72
+ repo_create_parser.add_argument(
73
+ "--exist-ok",
74
+ action="store_true",
75
+ help="Do not raise an error if repo already exists.",
76
+ )
77
+ repo_create_parser.add_argument(
78
+ "--resource-group-id",
79
+ type=str,
80
+ help="Resource group in which to create the repo. Resource groups is only available for Enterprise Hub organizations.",
81
+ )
82
+ repo_create_parser.add_argument(
83
+ "--type",
84
+ type=str,
85
+ help="[Deprecated]: use --repo-type instead.",
86
+ )
87
+ repo_create_parser.add_argument(
88
+ "-y",
89
+ "--yes",
90
+ action="store_true",
91
+ help="[Deprecated] no effect.",
92
+ )
93
+ repo_create_parser.add_argument(
94
+ "--organization", type=str, help="[Deprecated] Pass the organization namespace directly in the repo_id."
95
+ )
96
+ repo_create_parser.set_defaults(func=lambda args: RepoCreateCommand(args))
97
+
98
+
99
+ class RepoCreateCommand:
100
+ def __init__(self, args: argparse.Namespace):
101
+ self.repo_id: str = args.repo_id
102
+ self.repo_type: Optional[str] = args.repo_type or args.type
103
+ self.space_sdk: Optional[str] = args.space_sdk
104
+ self.organization: Optional[str] = args.organization
105
+ self.yes: bool = args.yes
106
+ self.private: bool = args.private
107
+ self.token: Optional[str] = args.token
108
+ self.exist_ok: bool = args.exist_ok
109
+ self.resource_group_id: Optional[str] = args.resource_group_id
110
+
111
+ if args.type is not None:
112
+ print(
113
+ ANSI.yellow(
114
+ "The --type argument is deprecated and will be removed in a future version. Use --repo-type instead."
115
+ )
116
+ )
117
+ if self.organization is not None:
118
+ print(
119
+ ANSI.yellow(
120
+ "The --organization argument is deprecated and will be removed in a future version. Pass the organization namespace directly in the repo_id."
121
+ )
122
+ )
123
+ if self.yes:
124
+ print(
125
+ ANSI.yellow(
126
+ "The --yes argument is deprecated and will be removed in a future version. It does not have any effect."
127
+ )
128
+ )
129
+
130
+ self._api = HfApi()
131
+
132
+ def run(self):
133
+ show_deprecation_warning("huggingface-cli repo", "hf repo")
134
+
135
+ if self.organization is not None:
136
+ if "/" in self.repo_id:
137
+ print(ANSI.red("You cannot pass both --organization and a repo_id with a namespace."))
138
+ exit(1)
139
+ self.repo_id = f"{self.organization}/{self.repo_id}"
140
+
141
+ repo_url = self._api.create_repo(
142
+ repo_id=self.repo_id,
143
+ repo_type=self.repo_type,
144
+ private=self.private,
145
+ token=self.token,
146
+ exist_ok=self.exist_ok,
147
+ resource_group_id=self.resource_group_id,
148
+ space_sdk=self.space_sdk,
149
+ )
150
+ print(f"Successfully created {ANSI.bold(repo_url.repo_id)} on the Hub.")
151
+ print(f"Your repo is now available at {ANSI.bold(repo_url)}")
phivenv/Lib/site-packages/huggingface_hub/commands/repo_files.py ADDED
@@ -0,0 +1,132 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023-present, the HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Contains command to update or delete files in a repository using the CLI.
16
+
17
+ Usage:
18
+ # delete all
19
+ huggingface-cli repo-files <repo_id> delete "*"
20
+
21
+ # delete single file
22
+ huggingface-cli repo-files <repo_id> delete file.txt
23
+
24
+ # delete single folder
25
+ huggingface-cli repo-files <repo_id> delete folder/
26
+
27
+ # delete multiple
28
+ huggingface-cli repo-files <repo_id> delete file.txt folder/ file2.txt
29
+
30
+ # delete multiple patterns
31
+ huggingface-cli repo-files <repo_id> delete file.txt "*.json" "folder/*.parquet"
32
+
33
+ # delete from different revision / repo-type
34
+ huggingface-cli repo-files <repo_id> delete file.txt --revision=refs/pr/1 --repo-type=dataset
35
+ """
36
+
37
+ from argparse import _SubParsersAction
38
+ from typing import List, Optional
39
+
40
+ from huggingface_hub import logging
41
+ from huggingface_hub.commands import BaseHuggingfaceCLICommand
42
+ from huggingface_hub.hf_api import HfApi
43
+
44
+ from ._cli_utils import show_deprecation_warning
45
+
46
+
47
+ logger = logging.get_logger(__name__)
48
+
49
+
50
+ class DeleteFilesSubCommand:
51
+ def __init__(self, args) -> None:
52
+ self.args = args
53
+ self.repo_id: str = args.repo_id
54
+ self.repo_type: Optional[str] = args.repo_type
55
+ self.revision: Optional[str] = args.revision
56
+ self.api: HfApi = HfApi(token=args.token, library_name="huggingface-cli")
57
+ self.patterns: List[str] = args.patterns
58
+ self.commit_message: Optional[str] = args.commit_message
59
+ self.commit_description: Optional[str] = args.commit_description
60
+ self.create_pr: bool = args.create_pr
61
+ self.token: Optional[str] = args.token
62
+
63
+ def run(self) -> None:
64
+ show_deprecation_warning("huggingface-cli repo-files", "hf repo-files")
65
+
66
+ logging.set_verbosity_info()
67
+ url = self.api.delete_files(
68
+ delete_patterns=self.patterns,
69
+ repo_id=self.repo_id,
70
+ repo_type=self.repo_type,
71
+ revision=self.revision,
72
+ commit_message=self.commit_message,
73
+ commit_description=self.commit_description,
74
+ create_pr=self.create_pr,
75
+ )
76
+ print(f"Files correctly deleted from repo. Commit: {url}.")
77
+ logging.set_verbosity_warning()
78
+
79
+
80
+ class RepoFilesCommand(BaseHuggingfaceCLICommand):
81
+ @staticmethod
82
+ def register_subcommand(parser: _SubParsersAction):
83
+ repo_files_parser = parser.add_parser("repo-files", help="Manage files in a repo on the Hub")
84
+ repo_files_parser.add_argument(
85
+ "repo_id", type=str, help="The ID of the repo to manage (e.g. `username/repo-name`)."
86
+ )
87
+ repo_files_subparsers = repo_files_parser.add_subparsers(
88
+ help="Action to execute against the files.",
89
+ required=True,
90
+ )
91
+ delete_subparser = repo_files_subparsers.add_parser(
92
+ "delete",
93
+ help="Delete files from a repo on the Hub",
94
+ )
95
+ delete_subparser.set_defaults(func=lambda args: DeleteFilesSubCommand(args))
96
+ delete_subparser.add_argument(
97
+ "patterns",
98
+ nargs="+",
99
+ type=str,
100
+ help="Glob patterns to match files to delete.",
101
+ )
102
+ delete_subparser.add_argument(
103
+ "--repo-type",
104
+ choices=["model", "dataset", "space"],
105
+ default="model",
106
+ help="Type of the repo to upload to (e.g. `dataset`).",
107
+ )
108
+ delete_subparser.add_argument(
109
+ "--revision",
110
+ type=str,
111
+ help=(
112
+ "An optional Git revision to push to. It can be a branch name "
113
+ "or a PR reference. If revision does not"
114
+ " exist and `--create-pr` is not set, a branch will be automatically created."
115
+ ),
116
+ )
117
+ delete_subparser.add_argument(
118
+ "--commit-message", type=str, help="The summary / title / first line of the generated commit."
119
+ )
120
+ delete_subparser.add_argument(
121
+ "--commit-description", type=str, help="The description of the generated commit."
122
+ )
123
+ delete_subparser.add_argument(
124
+ "--create-pr", action="store_true", help="Whether to create a new Pull Request for these changes."
125
+ )
126
+ repo_files_parser.add_argument(
127
+ "--token",
128
+ type=str,
129
+ help="A User Access Token generated from https://huggingface.co/settings/tokens",
130
+ )
131
+
132
+ repo_files_parser.set_defaults(func=RepoFilesCommand)
phivenv/Lib/site-packages/huggingface_hub/commands/scan_cache.py ADDED
@@ -0,0 +1,183 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022-present, the HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Contains command to scan the HF cache directory.
16
+
17
+ Usage:
18
+ huggingface-cli scan-cache
19
+ huggingface-cli scan-cache -v
20
+ huggingface-cli scan-cache -vvv
21
+ huggingface-cli scan-cache --dir ~/.cache/huggingface/hub
22
+ """
23
+
24
+ import time
25
+ from argparse import Namespace, _SubParsersAction
26
+ from typing import Optional
27
+
28
+ from ..utils import CacheNotFound, HFCacheInfo, scan_cache_dir
29
+ from . import BaseHuggingfaceCLICommand
30
+ from ._cli_utils import ANSI, show_deprecation_warning, tabulate
31
+
32
+
33
+ class ScanCacheCommand(BaseHuggingfaceCLICommand):
34
+ @staticmethod
35
+ def register_subcommand(parser: _SubParsersAction):
36
+ scan_cache_parser = parser.add_parser("scan-cache", help="Scan cache directory.")
37
+
38
+ scan_cache_parser.add_argument(
39
+ "--dir",
40
+ type=str,
41
+ default=None,
42
+ help="cache directory to scan (optional). Default to the default HuggingFace cache.",
43
+ )
44
+ scan_cache_parser.add_argument(
45
+ "-v",
46
+ "--verbose",
47
+ action="count",
48
+ default=0,
49
+ help="show a more verbose output",
50
+ )
51
+ scan_cache_parser.set_defaults(func=ScanCacheCommand)
52
+
53
+ def __init__(self, args: Namespace) -> None:
54
+ self.verbosity: int = args.verbose
55
+ self.cache_dir: Optional[str] = args.dir
56
+
57
+ def run(self):
58
+ show_deprecation_warning("huggingface-cli scan-cache", "hf cache scan")
59
+
60
+ try:
61
+ t0 = time.time()
62
+ hf_cache_info = scan_cache_dir(self.cache_dir)
63
+ t1 = time.time()
64
+ except CacheNotFound as exc:
65
+ cache_dir = exc.cache_dir
66
+ print(f"Cache directory not found: {cache_dir}")
67
+ return
68
+
69
+ self._print_hf_cache_info_as_table(hf_cache_info)
70
+
71
+ print(
72
+ f"\nDone in {round(t1 - t0, 1)}s. Scanned {len(hf_cache_info.repos)} repo(s)"
73
+ f" for a total of {ANSI.red(hf_cache_info.size_on_disk_str)}."
74
+ )
75
+ if len(hf_cache_info.warnings) > 0:
76
+ message = f"Got {len(hf_cache_info.warnings)} warning(s) while scanning."
77
+ if self.verbosity >= 3:
78
+ print(ANSI.gray(message))
79
+ for warning in hf_cache_info.warnings:
80
+ print(ANSI.gray(warning))
81
+ else:
82
+ print(ANSI.gray(message + " Use -vvv to print details."))
83
+
84
+ def _print_hf_cache_info_as_table(self, hf_cache_info: HFCacheInfo) -> None:
85
+ print(get_table(hf_cache_info, verbosity=self.verbosity))
86
+
87
+
88
+ def get_table(hf_cache_info: HFCacheInfo, *, verbosity: int = 0) -> str:
89
+ """Generate a table from the [`HFCacheInfo`] object.
90
+
91
+ Pass `verbosity=0` to get a table with a single row per repo, with columns
92
+ "repo_id", "repo_type", "size_on_disk", "nb_files", "last_accessed", "last_modified", "refs", "local_path".
93
+
94
+ Pass `verbosity=1` to get a table with a row per repo and revision (thus multiple rows can appear for a single repo), with columns
95
+ "repo_id", "repo_type", "revision", "size_on_disk", "nb_files", "last_modified", "refs", "local_path".
96
+
97
+ Example:
98
+ ```py
99
+ >>> from huggingface_hub.utils import scan_cache_dir
100
+ >>> from huggingface_hub.commands.scan_cache import get_table
101
+
102
+ >>> hf_cache_info = scan_cache_dir()
103
+ HFCacheInfo(...)
104
+
105
+ >>> print(get_table(hf_cache_info, verbosity=0))
106
+ REPO ID REPO TYPE SIZE ON DISK NB FILES LAST_ACCESSED LAST_MODIFIED REFS LOCAL PATH
107
+ --------------------------------------------------- --------- ------------ -------- ------------- ------------- ---- --------------------------------------------------------------------------------------------------
108
+ roberta-base model 2.7M 5 1 day ago 1 week ago main C:\\Users\\admin\\.cache\\huggingface\\hub\\models--roberta-base
109
+ suno/bark model 8.8K 1 1 week ago 1 week ago main C:\\Users\\admin\\.cache\\huggingface\\hub\\models--suno--bark
110
+ t5-base model 893.8M 4 4 days ago 7 months ago main C:\\Users\\admin\\.cache\\huggingface\\hub\\models--t5-base
111
+ t5-large model 3.0G 4 5 weeks ago 5 months ago main C:\\Users\\admin\\.cache\\huggingface\\hub\\models--t5-large
112
+
113
+ >>> print(get_table(hf_cache_info, verbosity=1))
114
+ REPO ID REPO TYPE REVISION SIZE ON DISK NB FILES LAST_MODIFIED REFS LOCAL PATH
115
+ --------------------------------------------------- --------- ---------------------------------------- ------------ -------- ------------- ---- -----------------------------------------------------------------------------------------------------------------------------------------------------
116
+ roberta-base model e2da8e2f811d1448a5b465c236feacd80ffbac7b 2.7M 5 1 week ago main C:\\Users\\admin\\.cache\\huggingface\\hub\\models--roberta-base\\snapshots\\e2da8e2f811d1448a5b465c236feacd80ffbac7b
117
+ suno/bark model 70a8a7d34168586dc5d028fa9666aceade177992 8.8K 1 1 week ago main C:\\Users\\admin\\.cache\\huggingface\\hub\\models--suno--bark\\snapshots\\70a8a7d34168586dc5d028fa9666aceade177992
118
+ t5-base model a9723ea7f1b39c1eae772870f3b547bf6ef7e6c1 893.8M 4 7 months ago main C:\\Users\\admin\\.cache\\huggingface\\hub\\models--t5-base\\snapshots\\a9723ea7f1b39c1eae772870f3b547bf6ef7e6c1
119
+ t5-large model 150ebc2c4b72291e770f58e6057481c8d2ed331a 3.0G 4 5 months ago main C:\\Users\\admin\\.cache\\huggingface\\hub\\models--t5-large\\snapshots\\150ebc2c4b72291e770f58e6057481c8d2ed331a ```
120
+ ```
121
+
122
+ Args:
123
+ hf_cache_info ([`HFCacheInfo`]):
124
+ The HFCacheInfo object to print.
125
+ verbosity (`int`, *optional*):
126
+ The verbosity level. Defaults to 0.
127
+
128
+ Returns:
129
+ `str`: The table as a string.
130
+ """
131
+ if verbosity == 0:
132
+ return tabulate(
133
+ rows=[
134
+ [
135
+ repo.repo_id,
136
+ repo.repo_type,
137
+ "{:>12}".format(repo.size_on_disk_str),
138
+ repo.nb_files,
139
+ repo.last_accessed_str,
140
+ repo.last_modified_str,
141
+ ", ".join(sorted(repo.refs)),
142
+ str(repo.repo_path),
143
+ ]
144
+ for repo in sorted(hf_cache_info.repos, key=lambda repo: repo.repo_path)
145
+ ],
146
+ headers=[
147
+ "REPO ID",
148
+ "REPO TYPE",
149
+ "SIZE ON DISK",
150
+ "NB FILES",
151
+ "LAST_ACCESSED",
152
+ "LAST_MODIFIED",
153
+ "REFS",
154
+ "LOCAL PATH",
155
+ ],
156
+ )
157
+ else:
158
+ return tabulate(
159
+ rows=[
160
+ [
161
+ repo.repo_id,
162
+ repo.repo_type,
163
+ revision.commit_hash,
164
+ "{:>12}".format(revision.size_on_disk_str),
165
+ revision.nb_files,
166
+ revision.last_modified_str,
167
+ ", ".join(sorted(revision.refs)),
168
+ str(revision.snapshot_path),
169
+ ]
170
+ for repo in sorted(hf_cache_info.repos, key=lambda repo: repo.repo_path)
171
+ for revision in sorted(repo.revisions, key=lambda revision: revision.commit_hash)
172
+ ],
173
+ headers=[
174
+ "REPO ID",
175
+ "REPO TYPE",
176
+ "REVISION",
177
+ "SIZE ON DISK",
178
+ "NB FILES",
179
+ "LAST_MODIFIED",
180
+ "REFS",
181
+ "LOCAL PATH",
182
+ ],
183
+ )
phivenv/Lib/site-packages/huggingface_hub/commands/tag.py ADDED
@@ -0,0 +1,161 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024-present, the HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """Contains commands to perform tag management with the CLI.
17
+
18
+ Usage Examples:
19
+ - Create a tag:
20
+ $ huggingface-cli tag user/my-model 1.0 --message "First release"
21
+ $ huggingface-cli tag user/my-model 1.0 -m "First release" --revision develop
22
+ $ huggingface-cli tag user/my-dataset 1.0 -m "First release" --repo-type dataset
23
+ $ huggingface-cli tag user/my-space 1.0
24
+ - List all tags:
25
+ $ huggingface-cli tag -l user/my-model
26
+ $ huggingface-cli tag --list user/my-dataset --repo-type dataset
27
+ - Delete a tag:
28
+ $ huggingface-cli tag -d user/my-model 1.0
29
+ $ huggingface-cli tag --delete user/my-dataset 1.0 --repo-type dataset
30
+ $ huggingface-cli tag -d user/my-space 1.0 -y
31
+ """
32
+
33
+ from argparse import Namespace, _SubParsersAction
34
+
35
+ from requests.exceptions import HTTPError
36
+
37
+ from huggingface_hub.commands import BaseHuggingfaceCLICommand
38
+ from huggingface_hub.constants import (
39
+ REPO_TYPES,
40
+ )
41
+ from huggingface_hub.hf_api import HfApi
42
+
43
+ from ..errors import HfHubHTTPError, RepositoryNotFoundError, RevisionNotFoundError
44
+ from ._cli_utils import ANSI, show_deprecation_warning
45
+
46
+
47
+ class TagCommands(BaseHuggingfaceCLICommand):
48
+ @staticmethod
49
+ def register_subcommand(parser: _SubParsersAction):
50
+ tag_parser = parser.add_parser("tag", help="(create, list, delete) tags for a repo in the hub")
51
+
52
+ tag_parser.add_argument("repo_id", type=str, help="The ID of the repo to tag (e.g. `username/repo-name`).")
53
+ tag_parser.add_argument("tag", nargs="?", type=str, help="The name of the tag for creation or deletion.")
54
+ tag_parser.add_argument("-m", "--message", type=str, help="The description of the tag to create.")
55
+ tag_parser.add_argument("--revision", type=str, help="The git revision to tag.")
56
+ tag_parser.add_argument(
57
+ "--token", type=str, help="A User Access Token generated from https://huggingface.co/settings/tokens."
58
+ )
59
+ tag_parser.add_argument(
60
+ "--repo-type",
61
+ choices=["model", "dataset", "space"],
62
+ default="model",
63
+ help="Set the type of repository (model, dataset, or space).",
64
+ )
65
+ tag_parser.add_argument("-y", "--yes", action="store_true", help="Answer Yes to prompts automatically.")
66
+
67
+ tag_parser.add_argument("-l", "--list", action="store_true", help="List tags for a repository.")
68
+ tag_parser.add_argument("-d", "--delete", action="store_true", help="Delete a tag for a repository.")
69
+
70
+ tag_parser.set_defaults(func=lambda args: handle_commands(args))
71
+
72
+
73
+ def handle_commands(args: Namespace):
74
+ show_deprecation_warning("huggingface-cli tag", "hf repo tag")
75
+
76
+ if args.list:
77
+ return TagListCommand(args)
78
+ elif args.delete:
79
+ return TagDeleteCommand(args)
80
+ else:
81
+ return TagCreateCommand(args)
82
+
83
+
84
+ class TagCommand:
85
+ def __init__(self, args: Namespace):
86
+ self.args = args
87
+ self.api = HfApi(token=self.args.token)
88
+ self.repo_id = self.args.repo_id
89
+ self.repo_type = self.args.repo_type
90
+ if self.repo_type not in REPO_TYPES:
91
+ print("Invalid repo --repo-type")
92
+ exit(1)
93
+
94
+
95
+ class TagCreateCommand(TagCommand):
96
+ def run(self):
97
+ print(f"You are about to create tag {ANSI.bold(self.args.tag)} on {self.repo_type} {ANSI.bold(self.repo_id)}")
98
+
99
+ try:
100
+ self.api.create_tag(
101
+ repo_id=self.repo_id,
102
+ tag=self.args.tag,
103
+ tag_message=self.args.message,
104
+ revision=self.args.revision,
105
+ repo_type=self.repo_type,
106
+ )
107
+ except RepositoryNotFoundError:
108
+ print(f"{self.repo_type.capitalize()} {ANSI.bold(self.repo_id)} not found.")
109
+ exit(1)
110
+ except RevisionNotFoundError:
111
+ print(f"Revision {ANSI.bold(self.args.revision)} not found.")
112
+ exit(1)
113
+ except HfHubHTTPError as e:
114
+ if e.response.status_code == 409:
115
+ print(f"Tag {ANSI.bold(self.args.tag)} already exists on {ANSI.bold(self.repo_id)}")
116
+ exit(1)
117
+ raise e
118
+
119
+ print(f"Tag {ANSI.bold(self.args.tag)} created on {ANSI.bold(self.repo_id)}")
120
+
121
+
122
+ class TagListCommand(TagCommand):
123
+ def run(self):
124
+ try:
125
+ refs = self.api.list_repo_refs(
126
+ repo_id=self.repo_id,
127
+ repo_type=self.repo_type,
128
+ )
129
+ except RepositoryNotFoundError:
130
+ print(f"{self.repo_type.capitalize()} {ANSI.bold(self.repo_id)} not found.")
131
+ exit(1)
132
+ except HTTPError as e:
133
+ print(e)
134
+ print(ANSI.red(e.response.text))
135
+ exit(1)
136
+ if len(refs.tags) == 0:
137
+ print("No tags found")
138
+ exit(0)
139
+ print(f"Tags for {self.repo_type} {ANSI.bold(self.repo_id)}:")
140
+ for tag in refs.tags:
141
+ print(tag.name)
142
+
143
+
144
+ class TagDeleteCommand(TagCommand):
145
+ def run(self):
146
+ print(f"You are about to delete tag {ANSI.bold(self.args.tag)} on {self.repo_type} {ANSI.bold(self.repo_id)}")
147
+
148
+ if not self.args.yes:
149
+ choice = input("Proceed? [Y/n] ").lower()
150
+ if choice not in ("", "y", "yes"):
151
+ print("Abort")
152
+ exit()
153
+ try:
154
+ self.api.delete_tag(repo_id=self.repo_id, tag=self.args.tag, repo_type=self.repo_type)
155
+ except RepositoryNotFoundError:
156
+ print(f"{self.repo_type.capitalize()} {ANSI.bold(self.repo_id)} not found.")
157
+ exit(1)
158
+ except RevisionNotFoundError:
159
+ print(f"Tag {ANSI.bold(self.args.tag)} not found on {ANSI.bold(self.repo_id)}")
160
+ exit(1)
161
+ print(f"Tag {ANSI.bold(self.args.tag)} deleted on {ANSI.bold(self.repo_id)}")
phivenv/Lib/site-packages/huggingface_hub/commands/upload.py ADDED
@@ -0,0 +1,318 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023-present, the HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Contains command to upload a repo or file with the CLI.
16
+
17
+ Usage:
18
+ # Upload file (implicit)
19
+ huggingface-cli upload my-cool-model ./my-cool-model.safetensors
20
+
21
+ # Upload file (explicit)
22
+ huggingface-cli upload my-cool-model ./my-cool-model.safetensors model.safetensors
23
+
24
+ # Upload directory (implicit). If `my-cool-model/` is a directory it will be uploaded, otherwise an exception is raised.
25
+ huggingface-cli upload my-cool-model
26
+
27
+ # Upload directory (explicit)
28
+ huggingface-cli upload my-cool-model ./models/my-cool-model .
29
+
30
+ # Upload filtered directory (example: tensorboard logs except for the last run)
31
+ huggingface-cli upload my-cool-model ./model/training /logs --include "*.tfevents.*" --exclude "*20230905*"
32
+
33
+ # Upload with wildcard
34
+ huggingface-cli upload my-cool-model "./model/training/*.safetensors"
35
+
36
+ # Upload private dataset
37
+ huggingface-cli upload Wauplin/my-cool-dataset ./data . --repo-type=dataset --private
38
+
39
+ # Upload with token
40
+ huggingface-cli upload Wauplin/my-cool-model --token=hf_****
41
+
42
+ # Sync local Space with Hub (upload new files, delete removed files)
43
+ huggingface-cli upload Wauplin/space-example --repo-type=space --exclude="/logs/*" --delete="*" --commit-message="Sync local Space with Hub"
44
+
45
+ # Schedule commits every 30 minutes
46
+ huggingface-cli upload Wauplin/my-cool-model --every=30
47
+ """
48
+
49
+ import os
50
+ import time
51
+ import warnings
52
+ from argparse import Namespace, _SubParsersAction
53
+ from typing import List, Optional
54
+
55
+ from huggingface_hub import logging
56
+ from huggingface_hub._commit_scheduler import CommitScheduler
57
+ from huggingface_hub.commands import BaseHuggingfaceCLICommand
58
+ from huggingface_hub.constants import HF_HUB_ENABLE_HF_TRANSFER
59
+ from huggingface_hub.errors import RevisionNotFoundError
60
+ from huggingface_hub.hf_api import HfApi
61
+ from huggingface_hub.utils import disable_progress_bars, enable_progress_bars
62
+ from huggingface_hub.utils._runtime import is_xet_available
63
+
64
+ from ._cli_utils import show_deprecation_warning
65
+
66
+
67
+ logger = logging.get_logger(__name__)
68
+
69
+
70
+ class UploadCommand(BaseHuggingfaceCLICommand):
71
+ @staticmethod
72
+ def register_subcommand(parser: _SubParsersAction):
73
+ upload_parser = parser.add_parser("upload", help="Upload a file or a folder to a repo on the Hub")
74
+ upload_parser.add_argument(
75
+ "repo_id", type=str, help="The ID of the repo to upload to (e.g. `username/repo-name`)."
76
+ )
77
+ upload_parser.add_argument(
78
+ "local_path",
79
+ nargs="?",
80
+ help="Local path to the file or folder to upload. Wildcard patterns are supported. Defaults to current directory.",
81
+ )
82
+ upload_parser.add_argument(
83
+ "path_in_repo",
84
+ nargs="?",
85
+ help="Path of the file or folder in the repo. Defaults to the relative path of the file or folder.",
86
+ )
87
+ upload_parser.add_argument(
88
+ "--repo-type",
89
+ choices=["model", "dataset", "space"],
90
+ default="model",
91
+ help="Type of the repo to upload to (e.g. `dataset`).",
92
+ )
93
+ upload_parser.add_argument(
94
+ "--revision",
95
+ type=str,
96
+ help=(
97
+ "An optional Git revision to push to. It can be a branch name or a PR reference. If revision does not"
98
+ " exist and `--create-pr` is not set, a branch will be automatically created."
99
+ ),
100
+ )
101
+ upload_parser.add_argument(
102
+ "--private",
103
+ action="store_true",
104
+ help=(
105
+ "Whether to create a private repo if repo doesn't exist on the Hub. Ignored if the repo already"
106
+ " exists."
107
+ ),
108
+ )
109
+ upload_parser.add_argument("--include", nargs="*", type=str, help="Glob patterns to match files to upload.")
110
+ upload_parser.add_argument(
111
+ "--exclude", nargs="*", type=str, help="Glob patterns to exclude from files to upload."
112
+ )
113
+ upload_parser.add_argument(
114
+ "--delete",
115
+ nargs="*",
116
+ type=str,
117
+ help="Glob patterns for file to be deleted from the repo while committing.",
118
+ )
119
+ upload_parser.add_argument(
120
+ "--commit-message", type=str, help="The summary / title / first line of the generated commit."
121
+ )
122
+ upload_parser.add_argument("--commit-description", type=str, help="The description of the generated commit.")
123
+ upload_parser.add_argument(
124
+ "--create-pr", action="store_true", help="Whether to upload content as a new Pull Request."
125
+ )
126
+ upload_parser.add_argument(
127
+ "--every",
128
+ type=float,
129
+ help="If set, a background job is scheduled to create commits every `every` minutes.",
130
+ )
131
+ upload_parser.add_argument(
132
+ "--token", type=str, help="A User Access Token generated from https://huggingface.co/settings/tokens"
133
+ )
134
+ upload_parser.add_argument(
135
+ "--quiet",
136
+ action="store_true",
137
+ help="If True, progress bars are disabled and only the path to the uploaded files is printed.",
138
+ )
139
+ upload_parser.set_defaults(func=UploadCommand)
140
+
141
+ def __init__(self, args: Namespace) -> None:
142
+ self.repo_id: str = args.repo_id
143
+ self.repo_type: Optional[str] = args.repo_type
144
+ self.revision: Optional[str] = args.revision
145
+ self.private: bool = args.private
146
+
147
+ self.include: Optional[List[str]] = args.include
148
+ self.exclude: Optional[List[str]] = args.exclude
149
+ self.delete: Optional[List[str]] = args.delete
150
+
151
+ self.commit_message: Optional[str] = args.commit_message
152
+ self.commit_description: Optional[str] = args.commit_description
153
+ self.create_pr: bool = args.create_pr
154
+ self.api: HfApi = HfApi(token=args.token, library_name="huggingface-cli")
155
+ self.quiet: bool = args.quiet # disable warnings and progress bars
156
+
157
+ # Check `--every` is valid
158
+ if args.every is not None and args.every <= 0:
159
+ raise ValueError(f"`every` must be a positive value (got '{args.every}')")
160
+ self.every: Optional[float] = args.every
161
+
162
+ # Resolve `local_path` and `path_in_repo`
163
+ repo_name: str = args.repo_id.split("/")[-1] # e.g. "Wauplin/my-cool-model" => "my-cool-model"
164
+ self.local_path: str
165
+ self.path_in_repo: str
166
+
167
+ if args.local_path is not None and any(c in args.local_path for c in ["*", "?", "["]):
168
+ if args.include is not None:
169
+ raise ValueError("Cannot set `--include` when passing a `local_path` containing a wildcard.")
170
+ if args.path_in_repo is not None and args.path_in_repo != ".":
171
+ raise ValueError("Cannot set `path_in_repo` when passing a `local_path` containing a wildcard.")
172
+ self.local_path = "."
173
+ self.include = args.local_path
174
+ self.path_in_repo = "."
175
+ elif args.local_path is None and os.path.isfile(repo_name):
176
+ # Implicit case 1: user provided only a repo_id which happen to be a local file as well => upload it with same name
177
+ self.local_path = repo_name
178
+ self.path_in_repo = repo_name
179
+ elif args.local_path is None and os.path.isdir(repo_name):
180
+ # Implicit case 2: user provided only a repo_id which happen to be a local folder as well => upload it at root
181
+ self.local_path = repo_name
182
+ self.path_in_repo = "."
183
+ elif args.local_path is None:
184
+ # Implicit case 3: user provided only a repo_id that does not match a local file or folder
185
+ # => the user must explicitly provide a local_path => raise exception
186
+ raise ValueError(f"'{repo_name}' is not a local file or folder. Please set `local_path` explicitly.")
187
+ elif args.path_in_repo is None and os.path.isfile(args.local_path):
188
+ # Explicit local path to file, no path in repo => upload it at root with same name
189
+ self.local_path = args.local_path
190
+ self.path_in_repo = os.path.basename(args.local_path)
191
+ elif args.path_in_repo is None:
192
+ # Explicit local path to folder, no path in repo => upload at root
193
+ self.local_path = args.local_path
194
+ self.path_in_repo = "."
195
+ else:
196
+ # Finally, if both paths are explicit
197
+ self.local_path = args.local_path
198
+ self.path_in_repo = args.path_in_repo
199
+
200
+ def run(self) -> None:
201
+ show_deprecation_warning("huggingface-cli upload", "hf upload")
202
+
203
+ if self.quiet:
204
+ disable_progress_bars()
205
+ with warnings.catch_warnings():
206
+ warnings.simplefilter("ignore")
207
+ print(self._upload())
208
+ enable_progress_bars()
209
+ else:
210
+ logging.set_verbosity_info()
211
+ print(self._upload())
212
+ logging.set_verbosity_warning()
213
+
214
+ def _upload(self) -> str:
215
+ if os.path.isfile(self.local_path):
216
+ if self.include is not None and len(self.include) > 0:
217
+ warnings.warn("Ignoring `--include` since a single file is uploaded.")
218
+ if self.exclude is not None and len(self.exclude) > 0:
219
+ warnings.warn("Ignoring `--exclude` since a single file is uploaded.")
220
+ if self.delete is not None and len(self.delete) > 0:
221
+ warnings.warn("Ignoring `--delete` since a single file is uploaded.")
222
+
223
+ if not is_xet_available() and not HF_HUB_ENABLE_HF_TRANSFER:
224
+ logger.info(
225
+ "Consider using `hf_transfer` for faster uploads. This solution comes with some limitations. See"
226
+ " https://huggingface.co/docs/huggingface_hub/hf_transfer for more details."
227
+ )
228
+
229
+ # Schedule commits if `every` is set
230
+ if self.every is not None:
231
+ if os.path.isfile(self.local_path):
232
+ # If file => watch entire folder + use allow_patterns
233
+ folder_path = os.path.dirname(self.local_path)
234
+ path_in_repo = (
235
+ self.path_in_repo[: -len(self.local_path)] # remove filename from path_in_repo
236
+ if self.path_in_repo.endswith(self.local_path)
237
+ else self.path_in_repo
238
+ )
239
+ allow_patterns = [self.local_path]
240
+ ignore_patterns = []
241
+ else:
242
+ folder_path = self.local_path
243
+ path_in_repo = self.path_in_repo
244
+ allow_patterns = self.include or []
245
+ ignore_patterns = self.exclude or []
246
+ if self.delete is not None and len(self.delete) > 0:
247
+ warnings.warn("Ignoring `--delete` when uploading with scheduled commits.")
248
+
249
+ scheduler = CommitScheduler(
250
+ folder_path=folder_path,
251
+ repo_id=self.repo_id,
252
+ repo_type=self.repo_type,
253
+ revision=self.revision,
254
+ allow_patterns=allow_patterns,
255
+ ignore_patterns=ignore_patterns,
256
+ path_in_repo=path_in_repo,
257
+ private=self.private,
258
+ every=self.every,
259
+ hf_api=self.api,
260
+ )
261
+ print(f"Scheduling commits every {self.every} minutes to {scheduler.repo_id}.")
262
+ try: # Block main thread until KeyboardInterrupt
263
+ while True:
264
+ time.sleep(100)
265
+ except KeyboardInterrupt:
266
+ scheduler.stop()
267
+ return "Stopped scheduled commits."
268
+
269
+ # Otherwise, create repo and proceed with the upload
270
+ if not os.path.isfile(self.local_path) and not os.path.isdir(self.local_path):
271
+ raise FileNotFoundError(f"No such file or directory: '{self.local_path}'.")
272
+ repo_id = self.api.create_repo(
273
+ repo_id=self.repo_id,
274
+ repo_type=self.repo_type,
275
+ exist_ok=True,
276
+ private=self.private,
277
+ space_sdk="gradio" if self.repo_type == "space" else None,
278
+ # ^ We don't want it to fail when uploading to a Space => let's set Gradio by default.
279
+ # ^ I'd rather not add CLI args to set it explicitly as we already have `huggingface-cli repo create` for that.
280
+ ).repo_id
281
+
282
+ # Check if branch already exists and if not, create it
283
+ if self.revision is not None and not self.create_pr:
284
+ try:
285
+ self.api.repo_info(repo_id=repo_id, repo_type=self.repo_type, revision=self.revision)
286
+ except RevisionNotFoundError:
287
+ logger.info(f"Branch '{self.revision}' not found. Creating it...")
288
+ self.api.create_branch(repo_id=repo_id, repo_type=self.repo_type, branch=self.revision, exist_ok=True)
289
+ # ^ `exist_ok=True` to avoid race concurrency issues
290
+
291
+ # File-based upload
292
+ if os.path.isfile(self.local_path):
293
+ return self.api.upload_file(
294
+ path_or_fileobj=self.local_path,
295
+ path_in_repo=self.path_in_repo,
296
+ repo_id=repo_id,
297
+ repo_type=self.repo_type,
298
+ revision=self.revision,
299
+ commit_message=self.commit_message,
300
+ commit_description=self.commit_description,
301
+ create_pr=self.create_pr,
302
+ )
303
+
304
+ # Folder-based upload
305
+ else:
306
+ return self.api.upload_folder(
307
+ folder_path=self.local_path,
308
+ path_in_repo=self.path_in_repo,
309
+ repo_id=repo_id,
310
+ repo_type=self.repo_type,
311
+ revision=self.revision,
312
+ commit_message=self.commit_message,
313
+ commit_description=self.commit_description,
314
+ create_pr=self.create_pr,
315
+ allow_patterns=self.include,
316
+ ignore_patterns=self.exclude,
317
+ delete_patterns=self.delete,
318
+ )
phivenv/Lib/site-packages/huggingface_hub/commands/upload_large_folder.py ADDED
@@ -0,0 +1,131 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023-present, the HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Contains command to upload a large folder with the CLI."""
16
+
17
+ import os
18
+ from argparse import Namespace, _SubParsersAction
19
+ from typing import List, Optional
20
+
21
+ from huggingface_hub import logging
22
+ from huggingface_hub.commands import BaseHuggingfaceCLICommand
23
+ from huggingface_hub.hf_api import HfApi
24
+ from huggingface_hub.utils import disable_progress_bars
25
+
26
+ from ._cli_utils import ANSI, show_deprecation_warning
27
+
28
+
29
+ logger = logging.get_logger(__name__)
30
+
31
+
32
+ class UploadLargeFolderCommand(BaseHuggingfaceCLICommand):
33
+ @staticmethod
34
+ def register_subcommand(parser: _SubParsersAction):
35
+ subparser = parser.add_parser("upload-large-folder", help="Upload a large folder to a repo on the Hub")
36
+ subparser.add_argument(
37
+ "repo_id", type=str, help="The ID of the repo to upload to (e.g. `username/repo-name`)."
38
+ )
39
+ subparser.add_argument("local_path", type=str, help="Local path to the file or folder to upload.")
40
+ subparser.add_argument(
41
+ "--repo-type",
42
+ choices=["model", "dataset", "space"],
43
+ help="Type of the repo to upload to (e.g. `dataset`).",
44
+ )
45
+ subparser.add_argument(
46
+ "--revision",
47
+ type=str,
48
+ help=("An optional Git revision to push to. It can be a branch name or a PR reference."),
49
+ )
50
+ subparser.add_argument(
51
+ "--private",
52
+ action="store_true",
53
+ help=(
54
+ "Whether to create a private repo if repo doesn't exist on the Hub. Ignored if the repo already exists."
55
+ ),
56
+ )
57
+ subparser.add_argument("--include", nargs="*", type=str, help="Glob patterns to match files to upload.")
58
+ subparser.add_argument("--exclude", nargs="*", type=str, help="Glob patterns to exclude from files to upload.")
59
+ subparser.add_argument(
60
+ "--token", type=str, help="A User Access Token generated from https://huggingface.co/settings/tokens"
61
+ )
62
+ subparser.add_argument(
63
+ "--num-workers", type=int, help="Number of workers to use to hash, upload and commit files."
64
+ )
65
+ subparser.add_argument("--no-report", action="store_true", help="Whether to disable regular status report.")
66
+ subparser.add_argument("--no-bars", action="store_true", help="Whether to disable progress bars.")
67
+ subparser.set_defaults(func=UploadLargeFolderCommand)
68
+
69
+ def __init__(self, args: Namespace) -> None:
70
+ self.repo_id: str = args.repo_id
71
+ self.local_path: str = args.local_path
72
+ self.repo_type: str = args.repo_type
73
+ self.revision: Optional[str] = args.revision
74
+ self.private: bool = args.private
75
+
76
+ self.include: Optional[List[str]] = args.include
77
+ self.exclude: Optional[List[str]] = args.exclude
78
+
79
+ self.api: HfApi = HfApi(token=args.token, library_name="huggingface-cli")
80
+
81
+ self.num_workers: Optional[int] = args.num_workers
82
+ self.no_report: bool = args.no_report
83
+ self.no_bars: bool = args.no_bars
84
+
85
+ if not os.path.isdir(self.local_path):
86
+ raise ValueError("Large upload is only supported for folders.")
87
+
88
+ def run(self) -> None:
89
+ show_deprecation_warning("huggingface-cli upload-large-folder", "hf upload-large-folder")
90
+
91
+ logging.set_verbosity_info()
92
+
93
+ print(
94
+ ANSI.yellow(
95
+ "You are about to upload a large folder to the Hub using `huggingface-cli upload-large-folder`. "
96
+ "This is a new feature so feedback is very welcome!\n"
97
+ "\n"
98
+ "A few things to keep in mind:\n"
99
+ " - Repository limits still apply: https://huggingface.co/docs/hub/repositories-recommendations\n"
100
+ " - Do not start several processes in parallel.\n"
101
+ " - You can interrupt and resume the process at any time. "
102
+ "The script will pick up where it left off except for partially uploaded files that would have to be entirely reuploaded.\n"
103
+ " - Do not upload the same folder to several repositories. If you need to do so, you must delete the `./.cache/huggingface/` folder first.\n"
104
+ "\n"
105
+ f"Some temporary metadata will be stored under `{self.local_path}/.cache/huggingface`.\n"
106
+ " - You must not modify those files manually.\n"
107
+ " - You must not delete the `./.cache/huggingface/` folder while a process is running.\n"
108
+ " - You can delete the `./.cache/huggingface/` folder to reinitialize the upload state when process is not running. Files will have to be hashed and preuploaded again, except for already committed files.\n"
109
+ "\n"
110
+ "If the process output is too verbose, you can disable the progress bars with `--no-bars`. "
111
+ "You can also entirely disable the status report with `--no-report`.\n"
112
+ "\n"
113
+ "For more details, run `huggingface-cli upload-large-folder --help` or check the documentation at "
114
+ "https://huggingface.co/docs/huggingface_hub/guides/upload#upload-a-large-folder."
115
+ )
116
+ )
117
+
118
+ if self.no_bars:
119
+ disable_progress_bars()
120
+
121
+ self.api.upload_large_folder(
122
+ repo_id=self.repo_id,
123
+ folder_path=self.local_path,
124
+ repo_type=self.repo_type,
125
+ revision=self.revision,
126
+ private=self.private,
127
+ allow_patterns=self.include,
128
+ ignore_patterns=self.exclude,
129
+ num_workers=self.num_workers,
130
+ print_report=not self.no_report,
131
+ )
phivenv/Lib/site-packages/huggingface_hub/commands/user.py ADDED
@@ -0,0 +1,208 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """Contains commands to authenticate to the Hugging Face Hub and interact with your repositories.
15
+
16
+ Usage:
17
+ # login and save token locally.
18
+ huggingface-cli login --token=hf_*** --add-to-git-credential
19
+
20
+ # switch between tokens
21
+ huggingface-cli auth switch
22
+
23
+ # list all tokens
24
+ huggingface-cli auth list
25
+
26
+ # logout from a specific token, if no token-name is provided, all tokens will be deleted from your machine.
27
+ huggingface-cli logout --token-name=your_token_name
28
+
29
+ # find out which huggingface.co account you are logged in as
30
+ huggingface-cli whoami
31
+ """
32
+
33
+ from argparse import _SubParsersAction
34
+ from typing import List, Optional
35
+
36
+ from requests.exceptions import HTTPError
37
+
38
+ from huggingface_hub.commands import BaseHuggingfaceCLICommand
39
+ from huggingface_hub.constants import ENDPOINT
40
+ from huggingface_hub.hf_api import HfApi
41
+
42
+ from .._login import auth_list, auth_switch, login, logout
43
+ from ..utils import get_stored_tokens, get_token, logging
44
+ from ._cli_utils import ANSI, show_deprecation_warning
45
+
46
+
47
+ logger = logging.get_logger(__name__)
48
+
49
+ try:
50
+ from InquirerPy import inquirer
51
+ from InquirerPy.base.control import Choice
52
+
53
+ _inquirer_py_available = True
54
+ except ImportError:
55
+ _inquirer_py_available = False
56
+
57
+
58
+ class UserCommands(BaseHuggingfaceCLICommand):
59
+ @staticmethod
60
+ def register_subcommand(parser: _SubParsersAction):
61
+ login_parser = parser.add_parser("login", help="Log in using a token from huggingface.co/settings/tokens")
62
+ login_parser.add_argument(
63
+ "--token",
64
+ type=str,
65
+ help="Token generated from https://huggingface.co/settings/tokens",
66
+ )
67
+ login_parser.add_argument(
68
+ "--add-to-git-credential",
69
+ action="store_true",
70
+ help="Optional: Save token to git credential helper.",
71
+ )
72
+ login_parser.set_defaults(func=lambda args: LoginCommand(args))
73
+ whoami_parser = parser.add_parser("whoami", help="Find out which huggingface.co account you are logged in as.")
74
+ whoami_parser.set_defaults(func=lambda args: WhoamiCommand(args))
75
+
76
+ logout_parser = parser.add_parser("logout", help="Log out")
77
+ logout_parser.add_argument(
78
+ "--token-name",
79
+ type=str,
80
+ help="Optional: Name of the access token to log out from.",
81
+ )
82
+ logout_parser.set_defaults(func=lambda args: LogoutCommand(args))
83
+
84
+ auth_parser = parser.add_parser("auth", help="Other authentication related commands")
85
+ auth_subparsers = auth_parser.add_subparsers(help="Authentication subcommands")
86
+ auth_switch_parser = auth_subparsers.add_parser("switch", help="Switch between access tokens")
87
+ auth_switch_parser.add_argument(
88
+ "--token-name",
89
+ type=str,
90
+ help="Optional: Name of the access token to switch to.",
91
+ )
92
+ auth_switch_parser.add_argument(
93
+ "--add-to-git-credential",
94
+ action="store_true",
95
+ help="Optional: Save token to git credential helper.",
96
+ )
97
+ auth_switch_parser.set_defaults(func=lambda args: AuthSwitchCommand(args))
98
+ auth_list_parser = auth_subparsers.add_parser("list", help="List all stored access tokens")
99
+ auth_list_parser.set_defaults(func=lambda args: AuthListCommand(args))
100
+
101
+
102
+ class BaseUserCommand:
103
+ def __init__(self, args):
104
+ self.args = args
105
+ self._api = HfApi()
106
+
107
+
108
+ class LoginCommand(BaseUserCommand):
109
+ def run(self):
110
+ show_deprecation_warning("huggingface-cli login", "hf auth login")
111
+
112
+ logging.set_verbosity_info()
113
+ login(
114
+ token=self.args.token,
115
+ add_to_git_credential=self.args.add_to_git_credential,
116
+ )
117
+
118
+
119
+ class LogoutCommand(BaseUserCommand):
120
+ def run(self):
121
+ show_deprecation_warning("huggingface-cli logout", "hf auth logout")
122
+
123
+ logging.set_verbosity_info()
124
+ logout(token_name=self.args.token_name)
125
+
126
+
127
+ class AuthSwitchCommand(BaseUserCommand):
128
+ def run(self):
129
+ show_deprecation_warning("huggingface-cli auth switch", "hf auth switch")
130
+
131
+ logging.set_verbosity_info()
132
+ token_name = self.args.token_name
133
+ if token_name is None:
134
+ token_name = self._select_token_name()
135
+
136
+ if token_name is None:
137
+ print("No token name provided. Aborting.")
138
+ exit()
139
+ auth_switch(token_name, add_to_git_credential=self.args.add_to_git_credential)
140
+
141
+ def _select_token_name(self) -> Optional[str]:
142
+ token_names = list(get_stored_tokens().keys())
143
+
144
+ if not token_names:
145
+ logger.error("No stored tokens found. Please login first.")
146
+ return None
147
+
148
+ if _inquirer_py_available:
149
+ return self._select_token_name_tui(token_names)
150
+ # if inquirer is not available, use a simpler terminal UI
151
+ print("Available stored tokens:")
152
+ for i, token_name in enumerate(token_names, 1):
153
+ print(f"{i}. {token_name}")
154
+ while True:
155
+ try:
156
+ choice = input("Enter the number of the token to switch to (or 'q' to quit): ")
157
+ if choice.lower() == "q":
158
+ return None
159
+ index = int(choice) - 1
160
+ if 0 <= index < len(token_names):
161
+ return token_names[index]
162
+ else:
163
+ print("Invalid selection. Please try again.")
164
+ except ValueError:
165
+ print("Invalid input. Please enter a number or 'q' to quit.")
166
+
167
+ def _select_token_name_tui(self, token_names: List[str]) -> Optional[str]:
168
+ choices = [Choice(token_name, name=token_name) for token_name in token_names]
169
+ try:
170
+ return inquirer.select(
171
+ message="Select a token to switch to:",
172
+ choices=choices,
173
+ default=None,
174
+ ).execute()
175
+ except KeyboardInterrupt:
176
+ logger.info("Token selection cancelled.")
177
+ return None
178
+
179
+
180
+ class AuthListCommand(BaseUserCommand):
181
+ def run(self):
182
+ show_deprecation_warning("huggingface-cli auth list", "hf auth list")
183
+
184
+ logging.set_verbosity_info()
185
+ auth_list()
186
+
187
+
188
+ class WhoamiCommand(BaseUserCommand):
189
+ def run(self):
190
+ show_deprecation_warning("huggingface-cli whoami", "hf auth whoami")
191
+
192
+ token = get_token()
193
+ if token is None:
194
+ print("Not logged in")
195
+ exit()
196
+ try:
197
+ info = self._api.whoami(token)
198
+ print(ANSI.bold("user: "), info["name"])
199
+ orgs = [org["name"] for org in info["orgs"]]
200
+ if orgs:
201
+ print(ANSI.bold("orgs: "), ",".join(orgs))
202
+
203
+ if ENDPOINT != "https://huggingface.co":
204
+ print(f"Authenticated through private endpoint: {ENDPOINT}")
205
+ except HTTPError as e:
206
+ print(e)
207
+ print(ANSI.red(e.response.text))
208
+ exit(1)
phivenv/Lib/site-packages/huggingface_hub/commands/version.py ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """Contains command to print information about the version.
15
+
16
+ Usage:
17
+ huggingface-cli version
18
+ """
19
+
20
+ from argparse import _SubParsersAction
21
+
22
+ from huggingface_hub import __version__
23
+
24
+ from . import BaseHuggingfaceCLICommand
25
+ from ._cli_utils import show_deprecation_warning
26
+
27
+
28
+ class VersionCommand(BaseHuggingfaceCLICommand):
29
+ def __init__(self, args):
30
+ self.args = args
31
+
32
+ @staticmethod
33
+ def register_subcommand(parser: _SubParsersAction):
34
+ version_parser = parser.add_parser("version", help="Print information about the huggingface-cli version.")
35
+ version_parser.set_defaults(func=VersionCommand)
36
+
37
+ def run(self) -> None:
38
+ show_deprecation_warning("huggingface-cli version", "hf version")
39
+
40
+ print(f"huggingface_hub version: {__version__}")
phivenv/Lib/site-packages/huggingface_hub/inference/__init__.py ADDED
File without changes
phivenv/Lib/site-packages/huggingface_hub/inference/__pycache__/__init__.cpython-39.pyc ADDED
Binary file (169 Bytes). View file
 
phivenv/Lib/site-packages/huggingface_hub/inference/__pycache__/_common.cpython-39.pyc ADDED
Binary file (11.9 kB). View file
 
phivenv/Lib/site-packages/huggingface_hub/inference/_client.py ADDED
The diff for this file is too large to render. See raw diff
 
phivenv/Lib/site-packages/huggingface_hub/inference/_common.py ADDED
@@ -0,0 +1,457 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023-present, the HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Contains utilities used by both the sync and async inference clients."""
16
+
17
+ import base64
18
+ import io
19
+ import json
20
+ import logging
21
+ import mimetypes
22
+ from contextlib import contextmanager
23
+ from dataclasses import dataclass
24
+ from pathlib import Path
25
+ from typing import (
26
+ TYPE_CHECKING,
27
+ Any,
28
+ AsyncIterable,
29
+ BinaryIO,
30
+ ContextManager,
31
+ Dict,
32
+ Generator,
33
+ Iterable,
34
+ List,
35
+ Literal,
36
+ NoReturn,
37
+ Optional,
38
+ Union,
39
+ overload,
40
+ )
41
+
42
+ from requests import HTTPError
43
+
44
+ from huggingface_hub.errors import (
45
+ GenerationError,
46
+ IncompleteGenerationError,
47
+ OverloadedError,
48
+ TextGenerationError,
49
+ UnknownError,
50
+ ValidationError,
51
+ )
52
+
53
+ from ..utils import get_session, is_aiohttp_available, is_numpy_available, is_pillow_available
54
+ from ._generated.types import ChatCompletionStreamOutput, TextGenerationStreamOutput
55
+
56
+
57
+ if TYPE_CHECKING:
58
+ from aiohttp import ClientResponse, ClientSession
59
+ from PIL.Image import Image
60
+
61
+ # TYPES
62
+ UrlT = str
63
+ PathT = Union[str, Path]
64
+ BinaryT = Union[bytes, BinaryIO]
65
+ ContentT = Union[BinaryT, PathT, UrlT, "Image"]
66
+
67
+ # Use to set a Accept: image/png header
68
+ TASKS_EXPECTING_IMAGES = {"text-to-image", "image-to-image"}
69
+
70
+ logger = logging.getLogger(__name__)
71
+
72
+
73
+ @dataclass
74
+ class RequestParameters:
75
+ url: str
76
+ task: str
77
+ model: Optional[str]
78
+ json: Optional[Union[str, Dict, List]]
79
+ data: Optional[ContentT]
80
+ headers: Dict[str, Any]
81
+
82
+
83
+ # Add dataclass for ModelStatus. We use this dataclass in get_model_status function.
84
+ @dataclass
85
+ class ModelStatus:
86
+ """
87
+ This Dataclass represents the model status in the HF Inference API.
88
+
89
+ Args:
90
+ loaded (`bool`):
91
+ If the model is currently loaded into HF's Inference API. Models
92
+ are loaded on-demand, leading to the user's first request taking longer.
93
+ If a model is loaded, you can be assured that it is in a healthy state.
94
+ state (`str`):
95
+ The current state of the model. This can be 'Loaded', 'Loadable', 'TooBig'.
96
+ If a model's state is 'Loadable', it's not too big and has a supported
97
+ backend. Loadable models are automatically loaded when the user first
98
+ requests inference on the endpoint. This means it is transparent for the
99
+ user to load a model, except that the first call takes longer to complete.
100
+ compute_type (`Dict`):
101
+ Information about the compute resource the model is using or will use, such as 'gpu' type and number of
102
+ replicas.
103
+ framework (`str`):
104
+ The name of the framework that the model was built with, such as 'transformers'
105
+ or 'text-generation-inference'.
106
+ """
107
+
108
+ loaded: bool
109
+ state: str
110
+ compute_type: Dict
111
+ framework: str
112
+
113
+
114
+ ## IMPORT UTILS
115
+
116
+
117
+ def _import_aiohttp():
118
+ # Make sure `aiohttp` is installed on the machine.
119
+ if not is_aiohttp_available():
120
+ raise ImportError("Please install aiohttp to use `AsyncInferenceClient` (`pip install aiohttp`).")
121
+ import aiohttp
122
+
123
+ return aiohttp
124
+
125
+
126
+ def _import_numpy():
127
+ """Make sure `numpy` is installed on the machine."""
128
+ if not is_numpy_available():
129
+ raise ImportError("Please install numpy to use deal with embeddings (`pip install numpy`).")
130
+ import numpy
131
+
132
+ return numpy
133
+
134
+
135
+ def _import_pil_image():
136
+ """Make sure `PIL` is installed on the machine."""
137
+ if not is_pillow_available():
138
+ raise ImportError(
139
+ "Please install Pillow to use deal with images (`pip install Pillow`). If you don't want the image to be"
140
+ " post-processed, use `client.post(...)` and get the raw response from the server."
141
+ )
142
+ from PIL import Image
143
+
144
+ return Image
145
+
146
+
147
+ ## ENCODING / DECODING UTILS
148
+
149
+
150
+ @overload
151
+ def _open_as_binary(
152
+ content: ContentT,
153
+ ) -> ContextManager[BinaryT]: ... # means "if input is not None, output is not None"
154
+
155
+
156
+ @overload
157
+ def _open_as_binary(
158
+ content: Literal[None],
159
+ ) -> ContextManager[Literal[None]]: ... # means "if input is None, output is None"
160
+
161
+
162
+ @contextmanager # type: ignore
163
+ def _open_as_binary(content: Optional[ContentT]) -> Generator[Optional[BinaryT], None, None]:
164
+ """Open `content` as a binary file, either from a URL, a local path, raw bytes, or a PIL Image.
165
+
166
+ Do nothing if `content` is None.
167
+
168
+ TODO: handle base64 as input
169
+ """
170
+ # If content is a string => must be either a URL or a path
171
+ if isinstance(content, str):
172
+ if content.startswith("https://") or content.startswith("http://"):
173
+ logger.debug(f"Downloading content from {content}")
174
+ yield get_session().get(content).content # TODO: retrieve as stream and pipe to post request ?
175
+ return
176
+ content = Path(content)
177
+ if not content.exists():
178
+ raise FileNotFoundError(
179
+ f"File not found at {content}. If `data` is a string, it must either be a URL or a path to a local"
180
+ " file. To pass raw content, please encode it as bytes first."
181
+ )
182
+
183
+ # If content is a Path => open it
184
+ if isinstance(content, Path):
185
+ logger.debug(f"Opening content from {content}")
186
+ with content.open("rb") as f:
187
+ yield f
188
+ return
189
+
190
+ # If content is a PIL Image => convert to bytes
191
+ if is_pillow_available():
192
+ from PIL import Image
193
+
194
+ if isinstance(content, Image.Image):
195
+ logger.debug("Converting PIL Image to bytes")
196
+ buffer = io.BytesIO()
197
+ content.save(buffer, format=content.format or "PNG")
198
+ yield buffer.getvalue()
199
+ return
200
+
201
+ # Otherwise: already a file-like object or None
202
+ yield content # type: ignore
203
+
204
+
205
+ def _b64_encode(content: ContentT) -> str:
206
+ """Encode a raw file (image, audio) into base64. Can be bytes, an opened file, a path or a URL."""
207
+ with _open_as_binary(content) as data:
208
+ data_as_bytes = data if isinstance(data, bytes) else data.read()
209
+ return base64.b64encode(data_as_bytes).decode()
210
+
211
+
212
+ def _as_url(content: ContentT, default_mime_type: str) -> str:
213
+ if isinstance(content, str) and (content.startswith("https://") or content.startswith("http://")):
214
+ return content
215
+
216
+ # Handle MIME type detection for different content types
217
+ mime_type = None
218
+ if isinstance(content, (str, Path)):
219
+ mime_type = mimetypes.guess_type(content, strict=False)[0]
220
+ elif is_pillow_available():
221
+ from PIL import Image
222
+
223
+ if isinstance(content, Image.Image):
224
+ # Determine MIME type from PIL Image format, in sync with `_open_as_binary`
225
+ mime_type = f"image/{(content.format or 'PNG').lower()}"
226
+
227
+ mime_type = mime_type or default_mime_type
228
+ encoded_data = _b64_encode(content)
229
+ return f"data:{mime_type};base64,{encoded_data}"
230
+
231
+
232
+ def _b64_to_image(encoded_image: str) -> "Image":
233
+ """Parse a base64-encoded string into a PIL Image."""
234
+ Image = _import_pil_image()
235
+ return Image.open(io.BytesIO(base64.b64decode(encoded_image)))
236
+
237
+
238
+ def _bytes_to_list(content: bytes) -> List:
239
+ """Parse bytes from a Response object into a Python list.
240
+
241
+ Expects the response body to be JSON-encoded data.
242
+
243
+ NOTE: This is exactly the same implementation as `_bytes_to_dict` and will not complain if the returned data is a
244
+ dictionary. The only advantage of having both is to help the user (and mypy) understand what kind of data to expect.
245
+ """
246
+ return json.loads(content.decode())
247
+
248
+
249
+ def _bytes_to_dict(content: bytes) -> Dict:
250
+ """Parse bytes from a Response object into a Python dictionary.
251
+
252
+ Expects the response body to be JSON-encoded data.
253
+
254
+ NOTE: This is exactly the same implementation as `_bytes_to_list` and will not complain if the returned data is a
255
+ list. The only advantage of having both is to help the user (and mypy) understand what kind of data to expect.
256
+ """
257
+ return json.loads(content.decode())
258
+
259
+
260
+ def _bytes_to_image(content: bytes) -> "Image":
261
+ """Parse bytes from a Response object into a PIL Image.
262
+
263
+ Expects the response body to be raw bytes. To deal with b64 encoded images, use `_b64_to_image` instead.
264
+ """
265
+ Image = _import_pil_image()
266
+ return Image.open(io.BytesIO(content))
267
+
268
+
269
+ def _as_dict(response: Union[bytes, Dict]) -> Dict:
270
+ return json.loads(response) if isinstance(response, bytes) else response
271
+
272
+
273
+ ## PAYLOAD UTILS
274
+
275
+
276
+ ## STREAMING UTILS
277
+
278
+
279
+ def _stream_text_generation_response(
280
+ bytes_output_as_lines: Iterable[bytes], details: bool
281
+ ) -> Union[Iterable[str], Iterable[TextGenerationStreamOutput]]:
282
+ """Used in `InferenceClient.text_generation`."""
283
+ # Parse ServerSentEvents
284
+ for byte_payload in bytes_output_as_lines:
285
+ try:
286
+ output = _format_text_generation_stream_output(byte_payload, details)
287
+ except StopIteration:
288
+ break
289
+ if output is not None:
290
+ yield output
291
+
292
+
293
+ async def _async_stream_text_generation_response(
294
+ bytes_output_as_lines: AsyncIterable[bytes], details: bool
295
+ ) -> Union[AsyncIterable[str], AsyncIterable[TextGenerationStreamOutput]]:
296
+ """Used in `AsyncInferenceClient.text_generation`."""
297
+ # Parse ServerSentEvents
298
+ async for byte_payload in bytes_output_as_lines:
299
+ try:
300
+ output = _format_text_generation_stream_output(byte_payload, details)
301
+ except StopIteration:
302
+ break
303
+ if output is not None:
304
+ yield output
305
+
306
+
307
+ def _format_text_generation_stream_output(
308
+ byte_payload: bytes, details: bool
309
+ ) -> Optional[Union[str, TextGenerationStreamOutput]]:
310
+ if not byte_payload.startswith(b"data:"):
311
+ return None # empty line
312
+
313
+ if byte_payload.strip() == b"data: [DONE]":
314
+ raise StopIteration("[DONE] signal received.")
315
+
316
+ # Decode payload
317
+ payload = byte_payload.decode("utf-8")
318
+ json_payload = json.loads(payload.lstrip("data:").rstrip("/n"))
319
+
320
+ # Either an error as being returned
321
+ if json_payload.get("error") is not None:
322
+ raise _parse_text_generation_error(json_payload["error"], json_payload.get("error_type"))
323
+
324
+ # Or parse token payload
325
+ output = TextGenerationStreamOutput.parse_obj_as_instance(json_payload)
326
+ return output.token.text if not details else output
327
+
328
+
329
+ def _stream_chat_completion_response(
330
+ bytes_lines: Iterable[bytes],
331
+ ) -> Iterable[ChatCompletionStreamOutput]:
332
+ """Used in `InferenceClient.chat_completion` if model is served with TGI."""
333
+ for item in bytes_lines:
334
+ try:
335
+ output = _format_chat_completion_stream_output(item)
336
+ except StopIteration:
337
+ break
338
+ if output is not None:
339
+ yield output
340
+
341
+
342
+ async def _async_stream_chat_completion_response(
343
+ bytes_lines: AsyncIterable[bytes],
344
+ ) -> AsyncIterable[ChatCompletionStreamOutput]:
345
+ """Used in `AsyncInferenceClient.chat_completion`."""
346
+ async for item in bytes_lines:
347
+ try:
348
+ output = _format_chat_completion_stream_output(item)
349
+ except StopIteration:
350
+ break
351
+ if output is not None:
352
+ yield output
353
+
354
+
355
+ def _format_chat_completion_stream_output(
356
+ byte_payload: bytes,
357
+ ) -> Optional[ChatCompletionStreamOutput]:
358
+ if not byte_payload.startswith(b"data:"):
359
+ return None # empty line
360
+
361
+ if byte_payload.strip() == b"data: [DONE]":
362
+ raise StopIteration("[DONE] signal received.")
363
+
364
+ # Decode payload
365
+ payload = byte_payload.decode("utf-8")
366
+ json_payload = json.loads(payload.lstrip("data:").rstrip("/n"))
367
+
368
+ # Either an error as being returned
369
+ if json_payload.get("error") is not None:
370
+ raise _parse_text_generation_error(json_payload["error"], json_payload.get("error_type"))
371
+
372
+ # Or parse token payload
373
+ return ChatCompletionStreamOutput.parse_obj_as_instance(json_payload)
374
+
375
+
376
+ async def _async_yield_from(client: "ClientSession", response: "ClientResponse") -> AsyncIterable[bytes]:
377
+ try:
378
+ async for byte_payload in response.content:
379
+ yield byte_payload.strip()
380
+ finally:
381
+ # Always close the underlying HTTP session to avoid resource leaks
382
+ await client.close()
383
+
384
+
385
+ # "TGI servers" are servers running with the `text-generation-inference` backend.
386
+ # This backend is the go-to solution to run large language models at scale. However,
387
+ # for some smaller models (e.g. "gpt2") the default `transformers` + `api-inference`
388
+ # solution is still in use.
389
+ #
390
+ # Both approaches have very similar APIs, but not exactly the same. What we do first in
391
+ # the `text_generation` method is to assume the model is served via TGI. If we realize
392
+ # it's not the case (i.e. we receive an HTTP 400 Bad Request), we fallback to the
393
+ # default API with a warning message. When that's the case, We remember the unsupported
394
+ # attributes for this model in the `_UNSUPPORTED_TEXT_GENERATION_KWARGS` global variable.
395
+ #
396
+ # In addition, TGI servers have a built-in API route for chat-completion, which is not
397
+ # available on the default API. We use this route to provide a more consistent behavior
398
+ # when available.
399
+ #
400
+ # For more details, see https://github.com/huggingface/text-generation-inference and
401
+ # https://huggingface.co/docs/api-inference/detailed_parameters#text-generation-task.
402
+
403
+ _UNSUPPORTED_TEXT_GENERATION_KWARGS: Dict[Optional[str], List[str]] = {}
404
+
405
+
406
+ def _set_unsupported_text_generation_kwargs(model: Optional[str], unsupported_kwargs: List[str]) -> None:
407
+ _UNSUPPORTED_TEXT_GENERATION_KWARGS.setdefault(model, []).extend(unsupported_kwargs)
408
+
409
+
410
+ def _get_unsupported_text_generation_kwargs(model: Optional[str]) -> List[str]:
411
+ return _UNSUPPORTED_TEXT_GENERATION_KWARGS.get(model, [])
412
+
413
+
414
+ # TEXT GENERATION ERRORS
415
+ # ----------------------
416
+ # Text-generation errors are parsed separately to handle as much as possible the errors returned by the text generation
417
+ # inference project (https://github.com/huggingface/text-generation-inference).
418
+ # ----------------------
419
+
420
+
421
+ def raise_text_generation_error(http_error: HTTPError) -> NoReturn:
422
+ """
423
+ Try to parse text-generation-inference error message and raise HTTPError in any case.
424
+
425
+ Args:
426
+ error (`HTTPError`):
427
+ The HTTPError that have been raised.
428
+ """
429
+ # Try to parse a Text Generation Inference error
430
+
431
+ try:
432
+ # Hacky way to retrieve payload in case of aiohttp error
433
+ payload = getattr(http_error, "response_error_payload", None) or http_error.response.json()
434
+ error = payload.get("error")
435
+ error_type = payload.get("error_type")
436
+ except Exception: # no payload
437
+ raise http_error
438
+
439
+ # If error_type => more information than `hf_raise_for_status`
440
+ if error_type is not None:
441
+ exception = _parse_text_generation_error(error, error_type)
442
+ raise exception from http_error
443
+
444
+ # Otherwise, fallback to default error
445
+ raise http_error
446
+
447
+
448
+ def _parse_text_generation_error(error: Optional[str], error_type: Optional[str]) -> TextGenerationError:
449
+ if error_type == "generation":
450
+ return GenerationError(error) # type: ignore
451
+ if error_type == "incomplete_generation":
452
+ return IncompleteGenerationError(error) # type: ignore
453
+ if error_type == "overloaded":
454
+ return OverloadedError(error) # type: ignore
455
+ if error_type == "validation":
456
+ return ValidationError(error) # type: ignore
457
+ return UnknownError(error) # type: ignore
phivenv/Lib/site-packages/huggingface_hub/inference/_generated/__init__.py ADDED
File without changes
phivenv/Lib/site-packages/huggingface_hub/inference/_generated/__pycache__/__init__.cpython-39.pyc ADDED
Binary file (180 Bytes). View file
 
phivenv/Lib/site-packages/huggingface_hub/inference/_generated/_async_client.py ADDED
The diff for this file is too large to render. See raw diff
 
phivenv/Lib/site-packages/huggingface_hub/inference/_generated/types/__init__.py ADDED
@@ -0,0 +1,192 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file is auto-generated by `utils/generate_inference_types.py`.
2
+ # Do not modify it manually.
3
+ #
4
+ # ruff: noqa: F401
5
+
6
+ from .audio_classification import (
7
+ AudioClassificationInput,
8
+ AudioClassificationOutputElement,
9
+ AudioClassificationOutputTransform,
10
+ AudioClassificationParameters,
11
+ )
12
+ from .audio_to_audio import AudioToAudioInput, AudioToAudioOutputElement
13
+ from .automatic_speech_recognition import (
14
+ AutomaticSpeechRecognitionEarlyStoppingEnum,
15
+ AutomaticSpeechRecognitionGenerationParameters,
16
+ AutomaticSpeechRecognitionInput,
17
+ AutomaticSpeechRecognitionOutput,
18
+ AutomaticSpeechRecognitionOutputChunk,
19
+ AutomaticSpeechRecognitionParameters,
20
+ )
21
+ from .base import BaseInferenceType
22
+ from .chat_completion import (
23
+ ChatCompletionInput,
24
+ ChatCompletionInputFunctionDefinition,
25
+ ChatCompletionInputFunctionName,
26
+ ChatCompletionInputGrammarType,
27
+ ChatCompletionInputJSONSchema,
28
+ ChatCompletionInputMessage,
29
+ ChatCompletionInputMessageChunk,
30
+ ChatCompletionInputMessageChunkType,
31
+ ChatCompletionInputResponseFormatJSONObject,
32
+ ChatCompletionInputResponseFormatJSONSchema,
33
+ ChatCompletionInputResponseFormatText,
34
+ ChatCompletionInputStreamOptions,
35
+ ChatCompletionInputTool,
36
+ ChatCompletionInputToolCall,
37
+ ChatCompletionInputToolChoiceClass,
38
+ ChatCompletionInputToolChoiceEnum,
39
+ ChatCompletionInputURL,
40
+ ChatCompletionOutput,
41
+ ChatCompletionOutputComplete,
42
+ ChatCompletionOutputFunctionDefinition,
43
+ ChatCompletionOutputLogprob,
44
+ ChatCompletionOutputLogprobs,
45
+ ChatCompletionOutputMessage,
46
+ ChatCompletionOutputToolCall,
47
+ ChatCompletionOutputTopLogprob,
48
+ ChatCompletionOutputUsage,
49
+ ChatCompletionStreamOutput,
50
+ ChatCompletionStreamOutputChoice,
51
+ ChatCompletionStreamOutputDelta,
52
+ ChatCompletionStreamOutputDeltaToolCall,
53
+ ChatCompletionStreamOutputFunction,
54
+ ChatCompletionStreamOutputLogprob,
55
+ ChatCompletionStreamOutputLogprobs,
56
+ ChatCompletionStreamOutputTopLogprob,
57
+ ChatCompletionStreamOutputUsage,
58
+ )
59
+ from .depth_estimation import DepthEstimationInput, DepthEstimationOutput
60
+ from .document_question_answering import (
61
+ DocumentQuestionAnsweringInput,
62
+ DocumentQuestionAnsweringInputData,
63
+ DocumentQuestionAnsweringOutputElement,
64
+ DocumentQuestionAnsweringParameters,
65
+ )
66
+ from .feature_extraction import FeatureExtractionInput, FeatureExtractionInputTruncationDirection
67
+ from .fill_mask import FillMaskInput, FillMaskOutputElement, FillMaskParameters
68
+ from .image_classification import (
69
+ ImageClassificationInput,
70
+ ImageClassificationOutputElement,
71
+ ImageClassificationOutputTransform,
72
+ ImageClassificationParameters,
73
+ )
74
+ from .image_segmentation import (
75
+ ImageSegmentationInput,
76
+ ImageSegmentationOutputElement,
77
+ ImageSegmentationParameters,
78
+ ImageSegmentationSubtask,
79
+ )
80
+ from .image_to_image import ImageToImageInput, ImageToImageOutput, ImageToImageParameters, ImageToImageTargetSize
81
+ from .image_to_text import (
82
+ ImageToTextEarlyStoppingEnum,
83
+ ImageToTextGenerationParameters,
84
+ ImageToTextInput,
85
+ ImageToTextOutput,
86
+ ImageToTextParameters,
87
+ )
88
+ from .image_to_video import ImageToVideoInput, ImageToVideoOutput, ImageToVideoParameters, ImageToVideoTargetSize
89
+ from .object_detection import (
90
+ ObjectDetectionBoundingBox,
91
+ ObjectDetectionInput,
92
+ ObjectDetectionOutputElement,
93
+ ObjectDetectionParameters,
94
+ )
95
+ from .question_answering import (
96
+ QuestionAnsweringInput,
97
+ QuestionAnsweringInputData,
98
+ QuestionAnsweringOutputElement,
99
+ QuestionAnsweringParameters,
100
+ )
101
+ from .sentence_similarity import SentenceSimilarityInput, SentenceSimilarityInputData
102
+ from .summarization import (
103
+ SummarizationInput,
104
+ SummarizationOutput,
105
+ SummarizationParameters,
106
+ SummarizationTruncationStrategy,
107
+ )
108
+ from .table_question_answering import (
109
+ Padding,
110
+ TableQuestionAnsweringInput,
111
+ TableQuestionAnsweringInputData,
112
+ TableQuestionAnsweringOutputElement,
113
+ TableQuestionAnsweringParameters,
114
+ )
115
+ from .text2text_generation import (
116
+ Text2TextGenerationInput,
117
+ Text2TextGenerationOutput,
118
+ Text2TextGenerationParameters,
119
+ Text2TextGenerationTruncationStrategy,
120
+ )
121
+ from .text_classification import (
122
+ TextClassificationInput,
123
+ TextClassificationOutputElement,
124
+ TextClassificationOutputTransform,
125
+ TextClassificationParameters,
126
+ )
127
+ from .text_generation import (
128
+ TextGenerationInput,
129
+ TextGenerationInputGenerateParameters,
130
+ TextGenerationInputGrammarType,
131
+ TextGenerationOutput,
132
+ TextGenerationOutputBestOfSequence,
133
+ TextGenerationOutputDetails,
134
+ TextGenerationOutputFinishReason,
135
+ TextGenerationOutputPrefillToken,
136
+ TextGenerationOutputToken,
137
+ TextGenerationStreamOutput,
138
+ TextGenerationStreamOutputStreamDetails,
139
+ TextGenerationStreamOutputToken,
140
+ TypeEnum,
141
+ )
142
+ from .text_to_audio import (
143
+ TextToAudioEarlyStoppingEnum,
144
+ TextToAudioGenerationParameters,
145
+ TextToAudioInput,
146
+ TextToAudioOutput,
147
+ TextToAudioParameters,
148
+ )
149
+ from .text_to_image import TextToImageInput, TextToImageOutput, TextToImageParameters
150
+ from .text_to_speech import (
151
+ TextToSpeechEarlyStoppingEnum,
152
+ TextToSpeechGenerationParameters,
153
+ TextToSpeechInput,
154
+ TextToSpeechOutput,
155
+ TextToSpeechParameters,
156
+ )
157
+ from .text_to_video import TextToVideoInput, TextToVideoOutput, TextToVideoParameters
158
+ from .token_classification import (
159
+ TokenClassificationAggregationStrategy,
160
+ TokenClassificationInput,
161
+ TokenClassificationOutputElement,
162
+ TokenClassificationParameters,
163
+ )
164
+ from .translation import TranslationInput, TranslationOutput, TranslationParameters, TranslationTruncationStrategy
165
+ from .video_classification import (
166
+ VideoClassificationInput,
167
+ VideoClassificationOutputElement,
168
+ VideoClassificationOutputTransform,
169
+ VideoClassificationParameters,
170
+ )
171
+ from .visual_question_answering import (
172
+ VisualQuestionAnsweringInput,
173
+ VisualQuestionAnsweringInputData,
174
+ VisualQuestionAnsweringOutputElement,
175
+ VisualQuestionAnsweringParameters,
176
+ )
177
+ from .zero_shot_classification import (
178
+ ZeroShotClassificationInput,
179
+ ZeroShotClassificationOutputElement,
180
+ ZeroShotClassificationParameters,
181
+ )
182
+ from .zero_shot_image_classification import (
183
+ ZeroShotImageClassificationInput,
184
+ ZeroShotImageClassificationOutputElement,
185
+ ZeroShotImageClassificationParameters,
186
+ )
187
+ from .zero_shot_object_detection import (
188
+ ZeroShotObjectDetectionBoundingBox,
189
+ ZeroShotObjectDetectionInput,
190
+ ZeroShotObjectDetectionOutputElement,
191
+ ZeroShotObjectDetectionParameters,
192
+ )
phivenv/Lib/site-packages/huggingface_hub/inference/_generated/types/__pycache__/__init__.cpython-39.pyc ADDED
Binary file (7.48 kB). View file