Spaces:
Running
Running
Commit
·
778bc8e
1
Parent(s):
063cbeb
openvino only
Browse files- app.py +21 -20
- benchmark.log +0 -41
- requirements.txt +1 -1
app.py
CHANGED
|
@@ -10,17 +10,17 @@ from config_store import (
|
|
| 10 |
get_inference_config,
|
| 11 |
get_openvino_config,
|
| 12 |
get_pytorch_config,
|
| 13 |
-
get_ipex_config,
|
| 14 |
)
|
| 15 |
from optimum_benchmark.launchers.base import Launcher # noqa
|
| 16 |
from optimum_benchmark.backends.openvino.utils import TASKS_TO_OVMODEL
|
| 17 |
from optimum_benchmark.backends.transformers_utils import TASKS_TO_MODEL_LOADERS
|
| 18 |
-
from optimum_benchmark.backends.ipex.utils import TASKS_TO_IPEXMODEL
|
| 19 |
from optimum_benchmark import (
|
| 20 |
BenchmarkConfig,
|
| 21 |
PyTorchConfig,
|
| 22 |
OVConfig,
|
| 23 |
-
IPEXConfig,
|
| 24 |
ProcessConfig,
|
| 25 |
InferenceConfig,
|
| 26 |
Benchmark,
|
|
@@ -31,21 +31,22 @@ from optimum_benchmark.logging_utils import setup_logging
|
|
| 31 |
DEVICE = "cpu"
|
| 32 |
LAUNCHER = "process"
|
| 33 |
SCENARIO = "inference"
|
| 34 |
-
BACKENDS = ["openvino", "pytorch"
|
| 35 |
MODELS = [
|
| 36 |
"google-bert/bert-base-uncased",
|
| 37 |
"openai-community/gpt2",
|
| 38 |
]
|
| 39 |
TASKS = (
|
| 40 |
set(TASKS_TO_OVMODEL.keys())
|
| 41 |
-
& set(TASKS_TO_IPEXMODEL.keys())
|
| 42 |
& set(TASKS_TO_MODEL_LOADERS.keys())
|
| 43 |
)
|
| 44 |
|
| 45 |
|
| 46 |
def run_benchmark(kwargs, oauth_token: gr.OAuthToken):
|
| 47 |
if oauth_token.token is None:
|
| 48 |
-
|
|
|
|
| 49 |
|
| 50 |
timestamp = time.strftime("%Y-%m-%d-%H-%M-%S")
|
| 51 |
username = whoami(oauth_token.token)["name"]
|
|
@@ -60,7 +61,7 @@ def run_benchmark(kwargs, oauth_token: gr.OAuthToken):
|
|
| 60 |
"inference": {},
|
| 61 |
"openvino": {},
|
| 62 |
"pytorch": {},
|
| 63 |
-
"ipex": {},
|
| 64 |
}
|
| 65 |
|
| 66 |
for key, value in kwargs.items():
|
|
@@ -91,17 +92,17 @@ def run_benchmark(kwargs, oauth_token: gr.OAuthToken):
|
|
| 91 |
device=DEVICE,
|
| 92 |
**configs["pytorch"],
|
| 93 |
)
|
| 94 |
-
configs["ipex"] = IPEXConfig(
|
| 95 |
-
|
| 96 |
-
|
| 97 |
-
|
| 98 |
-
|
| 99 |
-
)
|
| 100 |
|
| 101 |
outputs = {
|
| 102 |
"openvino": "Running benchmark for OpenVINO backend",
|
| 103 |
"pytorch": "Running benchmark for PyTorch backend",
|
| 104 |
-
"ipex": "Running benchmark for IPEX backend",
|
| 105 |
}
|
| 106 |
|
| 107 |
yield tuple(outputs[b] for b in BACKENDS)
|
|
@@ -199,8 +200,8 @@ def build_demo():
|
|
| 199 |
openvino_config = get_openvino_config()
|
| 200 |
with gr.Accordion(label="PyTorch Config", open=False, visible=True):
|
| 201 |
pytorch_config = get_pytorch_config()
|
| 202 |
-
with gr.Accordion(label="IPEX Config", open=False, visible=True):
|
| 203 |
-
|
| 204 |
|
| 205 |
backends.change(
|
| 206 |
inputs=backends,
|
|
@@ -218,8 +219,8 @@ def build_demo():
|
|
| 218 |
openvino_output = gr.Markdown()
|
| 219 |
with gr.Accordion(label="PyTorch Output", open=True, visible=True):
|
| 220 |
pytorch_output = gr.Markdown()
|
| 221 |
-
with gr.Accordion(label="IPEX Output", open=True, visible=True):
|
| 222 |
-
|
| 223 |
|
| 224 |
backends.change(
|
| 225 |
inputs=backends,
|
|
@@ -239,12 +240,12 @@ def build_demo():
|
|
| 239 |
*inference_config.values(),
|
| 240 |
*openvino_config.values(),
|
| 241 |
*pytorch_config.values(),
|
| 242 |
-
*ipex_config.values(),
|
| 243 |
},
|
| 244 |
outputs={
|
| 245 |
openvino_output,
|
| 246 |
pytorch_output,
|
| 247 |
-
ipex_output,
|
| 248 |
},
|
| 249 |
concurrency_limit=1,
|
| 250 |
)
|
|
|
|
| 10 |
get_inference_config,
|
| 11 |
get_openvino_config,
|
| 12 |
get_pytorch_config,
|
| 13 |
+
# get_ipex_config,
|
| 14 |
)
|
| 15 |
from optimum_benchmark.launchers.base import Launcher # noqa
|
| 16 |
from optimum_benchmark.backends.openvino.utils import TASKS_TO_OVMODEL
|
| 17 |
from optimum_benchmark.backends.transformers_utils import TASKS_TO_MODEL_LOADERS
|
| 18 |
+
# from optimum_benchmark.backends.ipex.utils import TASKS_TO_IPEXMODEL
|
| 19 |
from optimum_benchmark import (
|
| 20 |
BenchmarkConfig,
|
| 21 |
PyTorchConfig,
|
| 22 |
OVConfig,
|
| 23 |
+
# IPEXConfig,
|
| 24 |
ProcessConfig,
|
| 25 |
InferenceConfig,
|
| 26 |
Benchmark,
|
|
|
|
| 31 |
DEVICE = "cpu"
|
| 32 |
LAUNCHER = "process"
|
| 33 |
SCENARIO = "inference"
|
| 34 |
+
BACKENDS = ["openvino", "pytorch"]
|
| 35 |
MODELS = [
|
| 36 |
"google-bert/bert-base-uncased",
|
| 37 |
"openai-community/gpt2",
|
| 38 |
]
|
| 39 |
TASKS = (
|
| 40 |
set(TASKS_TO_OVMODEL.keys())
|
| 41 |
+
# & set(TASKS_TO_IPEXMODEL.keys())
|
| 42 |
& set(TASKS_TO_MODEL_LOADERS.keys())
|
| 43 |
)
|
| 44 |
|
| 45 |
|
| 46 |
def run_benchmark(kwargs, oauth_token: gr.OAuthToken):
|
| 47 |
if oauth_token.token is None:
|
| 48 |
+
gr.Error("Please login to be able to run the benchmark.")
|
| 49 |
+
return tuple(None for _ in BACKENDS)
|
| 50 |
|
| 51 |
timestamp = time.strftime("%Y-%m-%d-%H-%M-%S")
|
| 52 |
username = whoami(oauth_token.token)["name"]
|
|
|
|
| 61 |
"inference": {},
|
| 62 |
"openvino": {},
|
| 63 |
"pytorch": {},
|
| 64 |
+
# "ipex": {},
|
| 65 |
}
|
| 66 |
|
| 67 |
for key, value in kwargs.items():
|
|
|
|
| 92 |
device=DEVICE,
|
| 93 |
**configs["pytorch"],
|
| 94 |
)
|
| 95 |
+
# configs["ipex"] = IPEXConfig(
|
| 96 |
+
# task=task,
|
| 97 |
+
# model=model,
|
| 98 |
+
# device=DEVICE,
|
| 99 |
+
# **configs["ipex"],
|
| 100 |
+
# )
|
| 101 |
|
| 102 |
outputs = {
|
| 103 |
"openvino": "Running benchmark for OpenVINO backend",
|
| 104 |
"pytorch": "Running benchmark for PyTorch backend",
|
| 105 |
+
# "ipex": "Running benchmark for IPEX backend",
|
| 106 |
}
|
| 107 |
|
| 108 |
yield tuple(outputs[b] for b in BACKENDS)
|
|
|
|
| 200 |
openvino_config = get_openvino_config()
|
| 201 |
with gr.Accordion(label="PyTorch Config", open=False, visible=True):
|
| 202 |
pytorch_config = get_pytorch_config()
|
| 203 |
+
# with gr.Accordion(label="IPEX Config", open=False, visible=True):
|
| 204 |
+
# ipex_config = get_ipex_config()
|
| 205 |
|
| 206 |
backends.change(
|
| 207 |
inputs=backends,
|
|
|
|
| 219 |
openvino_output = gr.Markdown()
|
| 220 |
with gr.Accordion(label="PyTorch Output", open=True, visible=True):
|
| 221 |
pytorch_output = gr.Markdown()
|
| 222 |
+
# with gr.Accordion(label="IPEX Output", open=True, visible=True):
|
| 223 |
+
# ipex_output = gr.Markdown()
|
| 224 |
|
| 225 |
backends.change(
|
| 226 |
inputs=backends,
|
|
|
|
| 240 |
*inference_config.values(),
|
| 241 |
*openvino_config.values(),
|
| 242 |
*pytorch_config.values(),
|
| 243 |
+
# *ipex_config.values(),
|
| 244 |
},
|
| 245 |
outputs={
|
| 246 |
openvino_output,
|
| 247 |
pytorch_output,
|
| 248 |
+
# ipex_output,
|
| 249 |
},
|
| 250 |
concurrency_limit=1,
|
| 251 |
)
|
benchmark.log
DELETED
|
@@ -1,41 +0,0 @@
|
|
| 1 |
-
[ISOLATED-PROCESS][[36m2024-09-25 18:53:27,439[0m][[34mpytorch[0m][[32mINFO[0m] - Allocating pytorch backend[0m
|
| 2 |
-
[ISOLATED-PROCESS][[36m2024-09-25 18:53:27,439[0m][[34mpytorch[0m][[32mINFO[0m] - + Seeding backend with 42[0m
|
| 3 |
-
[ISOLATED-PROCESS][[36m2024-09-25 18:53:27,440[0m][[34mpytorch[0m][[32mINFO[0m] - + Benchmarking a Transformers model[0m
|
| 4 |
-
[ISOLATED-PROCESS][[36m2024-09-25 18:53:28,549[0m][[34minference[0m][[32mINFO[0m] - Allocating inference scenario[0m
|
| 5 |
-
[ISOLATED-PROCESS][[36m2024-09-25 18:53:28,549[0m][[34minference[0m][[32mINFO[0m] - + Creating input generator[0m
|
| 6 |
-
[ISOLATED-PROCESS][[36m2024-09-25 18:53:28,550[0m][[34minference[0m][[32mINFO[0m] - + Generating Inference inputs[0m
|
| 7 |
-
[ISOLATED-PROCESS][[36m2024-09-25 18:53:28,550[0m][[34minference[0m][[32mINFO[0m] - + Initializing Inference report[0m
|
| 8 |
-
[ISOLATED-PROCESS][[36m2024-09-25 18:53:28,551[0m][[34minference[0m][[32mINFO[0m] - + Preparing input shapes for Inference[0m
|
| 9 |
-
[ISOLATED-PROCESS][[36m2024-09-25 18:53:28,551[0m][[34minference[0m][[32mINFO[0m] - + Running model loading tracking[0m
|
| 10 |
-
[ISOLATED-PROCESS][[36m2024-09-25 18:53:28,551[0m][[34mlatency[0m][[32mINFO[0m] - + Tracking latency using CPU performance counter[0m
|
| 11 |
-
[ISOLATED-PROCESS][[36m2024-09-25 18:53:28,551[0m][[34minference[0m][[32mINFO[0m] - + Loading model for Inference[0m
|
| 12 |
-
[ISOLATED-PROCESS][[36m2024-09-25 18:53:28,552[0m][[34mpytorch[0m][[32mINFO[0m] - + Creating backend temporary directory[0m
|
| 13 |
-
[ISOLATED-PROCESS][[36m2024-09-25 18:53:28,553[0m][[34mpytorch[0m][[32mINFO[0m] - + Loading model with pretrained weights[0m
|
| 14 |
-
[ISOLATED-PROCESS][[36m2024-09-25 18:53:28,554[0m][[34mpytorch[0m][[32mINFO[0m] - + Loading Transformers model[0m
|
| 15 |
-
[ISOLATED-PROCESS][[36m2024-09-25 18:53:28,881[0m][[34mpytorch[0m][[32mINFO[0m] - + Enabling eval mode[0m
|
| 16 |
-
[ISOLATED-PROCESS][[36m2024-09-25 18:53:28,883[0m][[34mpytorch[0m][[32mINFO[0m] - + Cleaning up backend temporary directory[0m
|
| 17 |
-
[ISOLATED-PROCESS][[36m2024-09-25 18:53:28,884[0m][[34minference[0m][[32mINFO[0m] - + Preparing inputs for Inference[0m
|
| 18 |
-
[ISOLATED-PROCESS][[36m2024-09-25 18:53:28,885[0m][[34minference[0m][[32mINFO[0m] - + Warming up backend for Inference[0m
|
| 19 |
-
[ISOLATED-PROCESS][[36m2024-09-25 18:53:29,252[0m][[34minference[0m][[32mINFO[0m] - + Running Inference latency tracking[0m
|
| 20 |
-
[ISOLATED-PROCESS][[36m2024-09-25 18:53:29,252[0m][[34mlatency[0m][[32mINFO[0m] - + Tracking latency using CPU performance counter[0m
|
| 21 |
-
[ISOLATED-PROCESS][[36m2024-09-25 18:53:39,282[0m][[34mlatency[0m][[32mINFO[0m] - + load latency:[0m
|
| 22 |
-
[ISOLATED-PROCESS][[36m2024-09-25 18:53:39,282[0m][[34mlatency[0m][[32mINFO[0m] - - count: 1[0m
|
| 23 |
-
[ISOLATED-PROCESS][[36m2024-09-25 18:53:39,283[0m][[34mlatency[0m][[32mINFO[0m] - - total: 0.331631 s[0m
|
| 24 |
-
[ISOLATED-PROCESS][[36m2024-09-25 18:53:39,283[0m][[34mlatency[0m][[32mINFO[0m] - - mean: 0.331631 s[0m
|
| 25 |
-
[ISOLATED-PROCESS][[36m2024-09-25 18:53:39,283[0m][[34mlatency[0m][[32mINFO[0m] - - stdev: 0.000000 s (0.00%)[0m
|
| 26 |
-
[ISOLATED-PROCESS][[36m2024-09-25 18:53:39,284[0m][[34mlatency[0m][[32mINFO[0m] - - p50: 0.331631 s[0m
|
| 27 |
-
[ISOLATED-PROCESS][[36m2024-09-25 18:53:39,284[0m][[34mlatency[0m][[32mINFO[0m] - - p90: 0.331631 s[0m
|
| 28 |
-
[ISOLATED-PROCESS][[36m2024-09-25 18:53:39,284[0m][[34mlatency[0m][[32mINFO[0m] - - p95: 0.331631 s[0m
|
| 29 |
-
[ISOLATED-PROCESS][[36m2024-09-25 18:53:39,284[0m][[34mlatency[0m][[32mINFO[0m] - - p99: 0.331631 s[0m
|
| 30 |
-
[ISOLATED-PROCESS][[36m2024-09-25 18:53:39,285[0m][[34mlatency[0m][[32mINFO[0m] - + forward latency:[0m
|
| 31 |
-
[ISOLATED-PROCESS][[36m2024-09-25 18:53:39,285[0m][[34mlatency[0m][[32mINFO[0m] - - count: 266[0m
|
| 32 |
-
[ISOLATED-PROCESS][[36m2024-09-25 18:53:39,285[0m][[34mlatency[0m][[32mINFO[0m] - - total: 10.021863 s[0m
|
| 33 |
-
[ISOLATED-PROCESS][[36m2024-09-25 18:53:39,285[0m][[34mlatency[0m][[32mINFO[0m] - - mean: 0.037676 s[0m
|
| 34 |
-
[ISOLATED-PROCESS][[36m2024-09-25 18:53:39,286[0m][[34mlatency[0m][[32mINFO[0m] - - stdev: 0.008922 s (23.68%)[0m
|
| 35 |
-
[ISOLATED-PROCESS][[36m2024-09-25 18:53:39,286[0m][[34mlatency[0m][[32mINFO[0m] - - p50: 0.035970 s[0m
|
| 36 |
-
[ISOLATED-PROCESS][[36m2024-09-25 18:53:39,286[0m][[34mlatency[0m][[32mINFO[0m] - - p90: 0.043994 s[0m
|
| 37 |
-
[ISOLATED-PROCESS][[36m2024-09-25 18:53:39,286[0m][[34mlatency[0m][[32mINFO[0m] - - p95: 0.046884 s[0m
|
| 38 |
-
[ISOLATED-PROCESS][[36m2024-09-25 18:53:39,287[0m][[34mlatency[0m][[32mINFO[0m] - - p99: 0.073021 s[0m
|
| 39 |
-
[ISOLATED-PROCESS][[36m2024-09-25 18:53:39,288[0m][[34mlatency[0m][[32mINFO[0m] - + forward throughput: 53.083941 samples/s[0m
|
| 40 |
-
[ISOLATED-PROCESS][[36m2024-09-25 18:53:39,296[0m][[34mprocess[0m][[32mINFO[0m] - + Sending report to main process[0m
|
| 41 |
-
[ISOLATED-PROCESS][[36m2024-09-25 18:53:39,297[0m][[34mprocess[0m][[32mINFO[0m] - + Exiting isolated process[0m
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
requirements.txt
CHANGED
|
@@ -1 +1 @@
|
|
| 1 |
-
optimum-benchmark[openvino
|
|
|
|
| 1 |
+
optimum-benchmark[openvino]@git+https://github.com/huggingface/optimum-benchmark.git@markdown-report
|