Upload folder using huggingface_hub
Browse files
README.md
CHANGED
|
@@ -5,7 +5,7 @@ emoji: 🔥
|
|
| 5 |
colorFrom: indigo
|
| 6 |
colorTo: indigo
|
| 7 |
sdk: gradio
|
| 8 |
-
sdk_version:
|
| 9 |
app_file: run.py
|
| 10 |
pinned: false
|
| 11 |
hf_oauth: true
|
|
|
|
| 5 |
colorFrom: indigo
|
| 6 |
colorTo: indigo
|
| 7 |
sdk: gradio
|
| 8 |
+
sdk_version: 6.0.0
|
| 9 |
app_file: run.py
|
| 10 |
pinned: false
|
| 11 |
hf_oauth: true
|
requirements.txt
CHANGED
|
@@ -1,4 +1,4 @@
|
|
| 1 |
-
gradio-client @ git+https://github.com/gradio-app/gradio@
|
| 2 |
-
https://gradio-pypi-previews.s3.amazonaws.com/
|
| 3 |
opencv-python
|
| 4 |
numpy
|
|
|
|
| 1 |
+
gradio-client @ git+https://github.com/gradio-app/gradio@d007e6cf617baba5c62e49ec2b7ce278aa863a79#subdirectory=client/python
|
| 2 |
+
https://gradio-pypi-previews.s3.amazonaws.com/d007e6cf617baba5c62e49ec2b7ce278aa863a79/gradio-6.0.0-py3-none-any.whl
|
| 3 |
opencv-python
|
| 4 |
numpy
|
run.ipynb
CHANGED
|
@@ -1 +1 @@
|
|
| 1 |
-
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: streaming_filter_unified"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio opencv-python numpy "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import numpy as np\n", "import cv2 # type: ignore\n", "\n", "def transform_cv2(frame, transform):\n", " if transform == \"cartoon\":\n", " # prepare color\n", " img_color = cv2.pyrDown(cv2.pyrDown(frame))\n", " for _ in range(6):\n", " img_color = cv2.bilateralFilter(img_color, 9, 9, 7)\n", " img_color = cv2.pyrUp(cv2.pyrUp(img_color))\n", "\n", " # prepare edges\n", " img_edges = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)\n", " img_edges = cv2.adaptiveThreshold(\n", " cv2.medianBlur(img_edges, 7),\n", " 255,\n", " cv2.ADAPTIVE_THRESH_MEAN_C,\n", " cv2.THRESH_BINARY,\n", " 9,\n", " 2,\n", " )\n", " img_edges = cv2.cvtColor(img_edges, cv2.COLOR_GRAY2RGB)\n", " # combine color and edges\n", " img = cv2.bitwise_and(img_color, img_edges)\n", " return img\n", " elif transform == \"edges\":\n", " # perform edge detection\n", " img = cv2.cvtColor(cv2.Canny(frame, 100, 200), cv2.COLOR_GRAY2BGR)\n", " return img\n", " else:\n", " return np.flipud(frame)\n", "\n", "\n", "css=\"\"\".my-group {max-width: 500px !important; max-height: 500px !important;}\n", " .my-column {display: flex !important; justify-content: center !important; align-items: center !important};\"\"\"\n", "\n", "with gr.Blocks(
|
|
|
|
| 1 |
+
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: streaming_filter_unified"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio opencv-python numpy "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import numpy as np\n", "import cv2 # type: ignore\n", "\n", "def transform_cv2(frame, transform):\n", " if transform == \"cartoon\":\n", " # prepare color\n", " img_color = cv2.pyrDown(cv2.pyrDown(frame))\n", " for _ in range(6):\n", " img_color = cv2.bilateralFilter(img_color, 9, 9, 7)\n", " img_color = cv2.pyrUp(cv2.pyrUp(img_color))\n", "\n", " # prepare edges\n", " img_edges = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)\n", " img_edges = cv2.adaptiveThreshold(\n", " cv2.medianBlur(img_edges, 7),\n", " 255,\n", " cv2.ADAPTIVE_THRESH_MEAN_C,\n", " cv2.THRESH_BINARY,\n", " 9,\n", " 2,\n", " )\n", " img_edges = cv2.cvtColor(img_edges, cv2.COLOR_GRAY2RGB)\n", " # combine color and edges\n", " img = cv2.bitwise_and(img_color, img_edges)\n", " return img\n", " elif transform == \"edges\":\n", " # perform edge detection\n", " img = cv2.cvtColor(cv2.Canny(frame, 100, 200), cv2.COLOR_GRAY2BGR)\n", " return img\n", " else:\n", " return np.flipud(frame)\n", "\n", "\n", "css=\"\"\".my-group {max-width: 500px !important; max-height: 500px !important;}\n", " .my-column {display: flex !important; justify-content: center !important; align-items: center !important};\"\"\"\n", "\n", "with gr.Blocks() as demo:\n", " with gr.Column(elem_classes=[\"my-column\"]):\n", " with gr.Group(elem_classes=[\"my-group\"]):\n", " transform = gr.Dropdown(choices=[\"cartoon\", \"edges\", \"flip\"],\n", " value=\"flip\", label=\"Transformation\")\n", " input_img = gr.Image(sources=[\"webcam\"], type=\"numpy\", streaming=True)\n", " input_img.stream(transform_cv2, [input_img, transform], [input_img], time_limit=30, stream_every=0.1)\n", "\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch(css=css)\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
|
run.py
CHANGED
|
@@ -35,7 +35,7 @@ def transform_cv2(frame, transform):
|
|
| 35 |
css=""".my-group {max-width: 500px !important; max-height: 500px !important;}
|
| 36 |
.my-column {display: flex !important; justify-content: center !important; align-items: center !important};"""
|
| 37 |
|
| 38 |
-
with gr.Blocks(
|
| 39 |
with gr.Column(elem_classes=["my-column"]):
|
| 40 |
with gr.Group(elem_classes=["my-group"]):
|
| 41 |
transform = gr.Dropdown(choices=["cartoon", "edges", "flip"],
|
|
@@ -45,4 +45,4 @@ with gr.Blocks(css=css) as demo:
|
|
| 45 |
|
| 46 |
|
| 47 |
if __name__ == "__main__":
|
| 48 |
-
demo.launch()
|
|
|
|
| 35 |
css=""".my-group {max-width: 500px !important; max-height: 500px !important;}
|
| 36 |
.my-column {display: flex !important; justify-content: center !important; align-items: center !important};"""
|
| 37 |
|
| 38 |
+
with gr.Blocks() as demo:
|
| 39 |
with gr.Column(elem_classes=["my-column"]):
|
| 40 |
with gr.Group(elem_classes=["my-group"]):
|
| 41 |
transform = gr.Dropdown(choices=["cartoon", "edges", "flip"],
|
|
|
|
| 45 |
|
| 46 |
|
| 47 |
if __name__ == "__main__":
|
| 48 |
+
demo.launch(css=css)
|