Spaces:
Running
Running
Try to fix execution code
Browse files
app.py
CHANGED
|
@@ -105,8 +105,8 @@ def process_func(x: np.ndarray, sampling_rate: int) -> dict:
|
|
| 105 |
def recognize(input_file):
|
| 106 |
# sampling_rate, signal = input_microphone
|
| 107 |
# signal = signal.astype(np.float32, order="C") / 32768.0
|
| 108 |
-
if
|
| 109 |
-
signal, sampling_rate = audiofile.read(
|
| 110 |
else:
|
| 111 |
raise gr.Error(
|
| 112 |
"No audio file submitted! "
|
|
@@ -154,59 +154,24 @@ allow_flagging = "never"
|
|
| 154 |
# # demo.launch()
|
| 155 |
# file.launch()
|
| 156 |
|
| 157 |
-
def toggle_input(choice):
|
| 158 |
-
if choice == "microphone":
|
| 159 |
-
return gr.update(visible=True), gr.update(visible=False)
|
| 160 |
-
else:
|
| 161 |
-
return gr.update(visible=False), gr.update(visible=True)
|
| 162 |
-
|
| 163 |
|
| 164 |
with gr.Blocks() as demo:
|
| 165 |
gr.Markdown(description)
|
| 166 |
-
with gr.Tab(label="
|
| 167 |
with gr.Row():
|
|
|
|
| 168 |
with gr.Column():
|
| 169 |
-
|
| 170 |
-
# ["microphone", "file"],
|
| 171 |
-
# value="file",
|
| 172 |
-
# label="How would you like to upload your audio?",
|
| 173 |
-
# )
|
| 174 |
-
input_file = gr.Audio(
|
| 175 |
sources=["upload", "microphone"],
|
| 176 |
type="filepath",
|
| 177 |
-
label="Audio
|
| 178 |
)
|
| 179 |
-
# input_microphone = gr.Audio(
|
| 180 |
-
# sources="microphone",
|
| 181 |
-
# type="filepath",
|
| 182 |
-
# label="Microphone",
|
| 183 |
-
# )
|
| 184 |
-
|
| 185 |
-
# output_selector = gr.Dropdown(
|
| 186 |
-
# choices=["age", "gender"],
|
| 187 |
-
# label="Output",
|
| 188 |
-
# value="age",
|
| 189 |
-
# )
|
| 190 |
submit_btn = gr.Button(value="Submit")
|
| 191 |
with gr.Column():
|
| 192 |
output_age = gr.Textbox(label="Age")
|
| 193 |
-
output_gender = gr.Label(label="
|
| 194 |
-
|
| 195 |
-
# def update_output(output_selector):
|
| 196 |
-
# """Set different output types for different model outputs."""
|
| 197 |
-
# if output_selector == "gender":
|
| 198 |
-
# output = gr.Label(label="gender")
|
| 199 |
-
# return output
|
| 200 |
-
|
| 201 |
-
# output_selector.input(update_output, output_selector, output)
|
| 202 |
-
|
| 203 |
-
outputs = [output_age, output_gender]
|
| 204 |
-
|
| 205 |
-
# input_selection.change(toggle_input, input_selection, inputs)
|
| 206 |
-
# input_microphone.change(lambda x: x, input_microphone, outputs)
|
| 207 |
-
# input_file.change(lambda x: x, input_file, outputs)
|
| 208 |
|
| 209 |
-
submit_btn.click(recognize,
|
| 210 |
|
| 211 |
|
| 212 |
demo.launch(debug=True)
|
|
|
|
| 105 |
def recognize(input_file):
|
| 106 |
# sampling_rate, signal = input_microphone
|
| 107 |
# signal = signal.astype(np.float32, order="C") / 32768.0
|
| 108 |
+
if input_fileis not None:
|
| 109 |
+
signal, sampling_rate = audiofile.read(input_file, duration=duration)
|
| 110 |
else:
|
| 111 |
raise gr.Error(
|
| 112 |
"No audio file submitted! "
|
|
|
|
| 154 |
# # demo.launch()
|
| 155 |
# file.launch()
|
| 156 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 157 |
|
| 158 |
with gr.Blocks() as demo:
|
| 159 |
gr.Markdown(description)
|
| 160 |
+
with gr.Tab(label="Speech analysis"):
|
| 161 |
with gr.Row():
|
| 162 |
+
gr.Markdown("Only the first second of the audio is processed.")
|
| 163 |
with gr.Column():
|
| 164 |
+
input = gr.Audio(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 165 |
sources=["upload", "microphone"],
|
| 166 |
type="filepath",
|
| 167 |
+
label="Audio input",
|
| 168 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 169 |
submit_btn = gr.Button(value="Submit")
|
| 170 |
with gr.Column():
|
| 171 |
output_age = gr.Textbox(label="Age")
|
| 172 |
+
output_gender = gr.Label(label="Gender")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 173 |
|
| 174 |
+
submit_btn.click(recognize, input, [output_age, output_gender])
|
| 175 |
|
| 176 |
|
| 177 |
demo.launch(debug=True)
|